2 Commits

Author SHA1 Message Date
Caleb Connolly
b54044a605 hookscripts: glob hook directories 2023-04-18 17:53:37 +01:00
Caleb Connolly
f0544999db filelist/hookfiles: encapsulate dir searching out of slurpFiles
Move the directory searching / globbing code out of slurpFiles and into
the filelist module so it can be used elsewhere.
2023-04-07 02:56:38 +01:00
26 changed files with 474 additions and 906 deletions

View File

@@ -6,126 +6,43 @@ image: alpine:edge
variables: variables:
GOFLAGS: "-buildvcs=false" GOFLAGS: "-buildvcs=false"
PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/mkinitfs-vendor-${CI_COMMIT_TAG}/${CI_COMMIT_TAG}" PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/mkinitfs-vendor-${CI_COMMIT_TAG}/${CI_COMMIT_TAG}"
CI_TRON_TEMPLATE_PROJECT: &ci-tron-template-project postmarketOS/ci-common
CI_TRON_JOB_TEMPLATE_PROJECT_URL: $CI_SERVER_URL/$CI_TRON_TEMPLATE_PROJECT
CI_TRON_JOB_TEMPLATE_COMMIT: &ci-tron-template-commit 7c95b5f2d53533e8722abf57c73e558168e811f3
include:
- project: *ci-tron-template-project
ref: *ci-tron-template-commit
file: '/ci-tron/common.yml'
stages: stages:
- lint
- build - build
- hardware tests
- vendor - vendor
- release - release
workflow: # defaults for "only"
rules: # We need to run the CI jobs in a "merge request specific context", if CI is
- if: $CI_PIPELINE_SOURCE == "merge_request_event" # running in a merge request. Otherwise the environment variable that holds the
- if: $CI_COMMIT_BRANCH == 'master' # merge request ID is not available. This means, we must set the "only"
- if: '$CI_COMMIT_TAG != null' # variable accordingly - and if we only do it for one job, all other jobs will
# not get executed. So have the defaults here, and use them in all jobs that
# should run on both the master branch, and in merge requests.
# https://docs.gitlab.com/ee/ci/merge_request_pipelines/index.html#excluding-certain-jobs
.only-default: &only-default
only:
- master
- merge_requests
- tags
build: build:
stage: build stage: build
variables: <<: *only-default
GOTEST: "gotestsum --junitfile report.xml --format testname -- ./..."
parallel:
matrix:
- TAG: shared
- TAG: arm64
tags:
- $TAG
before_script: before_script:
- apk -q add go gotestsum staticcheck make scdoc - apk -q add go staticcheck make scdoc
script: script:
- make test - make test
- make - make
after_script:
- mkdir -p rootfs/usr/sbin
- cp mkinitfs rootfs/usr/sbin
artifacts: artifacts:
expire_in: 1 week expire_in: 1 week
reports:
junit: report.xml
paths:
- rootfs
.qemu-common:
variables:
DEVICE_NAME: qemu-$CPU_ARCH
KERNEL_VARIANT: lts
rules:
- if: '$CI_COMMIT_TAG != null'
when: never
.build-ci-tron-qemu:
stage: hardware tests
extends:
- .pmos-ci-tron-build-boot-artifacts
- .qemu-common
variables:
INSTALL_PACKAGES: device-${DEVICE_NAME} device-${DEVICE_NAME}-kernel-${KERNEL_VARIANT} postmarketos-mkinitfs-hook-ci
build-ci-tron-qemu-amd64:
extends:
- .build-ci-tron-qemu
needs:
- job: "build"
parallel:
matrix:
- TAG: shared
variables:
CPU_ARCH: amd64
build-ci-tron-qemu-aarch64:
extends:
- .build-ci-tron-qemu
needs:
- job: "build"
parallel:
matrix:
- TAG: arm64
variables:
CPU_ARCH: aarch64
.test-ci-tron-qemu:
stage: hardware tests
extends:
- .pmos-ci-tron-initramfs-test
- .qemu-common
dependencies: []
variables:
CI_TRON_KERNEL__URL: "glartifact://build-ci-tron-qemu-$CPU_ARCH/${CI_TRON__PMB_EXPORT_PATH}/vmlinuz-${KERNEL_VARIANT}"
CI_TRON_INITRAMFS__INITRAMFS__URL: "glartifact://build-ci-tron-qemu-$CPU_ARCH/${CI_TRON__PMB_EXPORT_PATH}/initramfs"
CI_TRON_KERNEL_CMDLINE__DEVICEINFO: 'console=tty1 console=ttyS0,115200 PMOS_FORCE_PARTITION_RESIZE'
test-ci-tron-qemu-amd64:
extends:
- .test-ci-tron-qemu
- .pmos-ci-tron-runner-qemu-amd64
needs:
- job: 'build-ci-tron-qemu-amd64'
artifacts: false
variables:
CPU_ARCH: amd64
test-ci-tron-qemu-aarch64:
extends:
- .test-ci-tron-qemu
- .pmos-ci-tron-runner-qemu-aarch64
needs:
- job: 'build-ci-tron-qemu-aarch64'
artifacts: false
variables:
CPU_ARCH: aarch64
vendor: vendor:
stage: vendor stage: vendor
image: alpine:latest image: alpine:latest
rules: only:
- if: '$CI_COMMIT_TAG != null' - tags
before_script: before_script:
- apk -q add curl go make - apk -q add curl go make
script: script:
@@ -137,8 +54,8 @@ vendor:
release: release:
stage: release stage: release
image: registry.gitlab.com/gitlab-org/release-cli:latest image: registry.gitlab.com/gitlab-org/release-cli:latest
rules: only:
- if: '$CI_COMMIT_TAG != null' - tags
script: script:
- | - |
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \ release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \

View File

@@ -12,13 +12,7 @@ GO?=go
GOFLAGS?= GOFLAGS?=
LDFLAGS+=-s -w -X main.Version=$(VERSION) LDFLAGS+=-s -w -X main.Version=$(VERSION)
RM?=rm -f RM?=rm -f
GOTESTOPTS?=-count=1 -race GOTEST=go test -count=1 -race
GOTEST?=go test ./...
DISABLE_GOGC?=
ifeq ($(DISABLE_GOGC),1)
LDFLAGS+=-X main.DisableGC=true
endif
GOSRC!=find * -name '*.go' GOSRC!=find * -name '*.go'
GOSRC+=go.mod go.sum GOSRC+=go.mod go.sum
@@ -48,10 +42,10 @@ test:
fi fi
@staticcheck ./... @staticcheck ./...
$(GOTEST) $(GOTESTOPTS) @$(GOTEST) ./...
clean: clean:
$(RM) mkinitfs $(DOCS) $(RM) mkinitfs $(DOCS)
$(RM) $(VENDORED)* $(RM) $(VENDORED)*
install: $(DOCS) mkinitfs install: $(DOCS) mkinitfs

View File

@@ -9,7 +9,6 @@ import (
"log" "log"
"os" "os"
"path/filepath" "path/filepath"
"runtime/debug"
"strings" "strings"
"time" "time"
@@ -21,6 +20,7 @@ import (
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookscripts" "gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookscripts"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/initramfs" "gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/initramfs"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/modules" "gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/modules"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/osksdl"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc" "gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil" "gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo" "gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
@@ -28,14 +28,8 @@ import (
// set at build time // set at build time
var Version string var Version string
var DisableGC string
func main() { func main() {
// To allow working around silly GC-related issues, like https://gitlab.com/qemu-project/qemu/-/issues/2560
if strings.ToLower(DisableGC) == "true" {
debug.SetGCPercent(-1)
}
retCode := 0 retCode := 0
defer func() { os.Exit(retCode) }() defer func() { os.Exit(retCode) }()
@@ -55,13 +49,18 @@ func main() {
log.Default().SetFlags(log.Lmicroseconds) log.Default().SetFlags(log.Lmicroseconds)
var devinfo deviceinfo.DeviceInfo deviceinfoFile := "/etc/deviceinfo"
deverr_usr := devinfo.ReadDeviceinfo("/usr/share/deviceinfo/deviceinfo") if exists, err := misc.Exists(deviceinfoFile); !exists {
deverr_etc := devinfo.ReadDeviceinfo("/etc/deviceinfo") log.Printf("NOTE: %q not found, this file is required by mkinitfs.\n", deviceinfoFile)
if deverr_etc != nil && deverr_usr != nil { return
log.Println("Error reading deviceinfo") } else if err != nil {
log.Println("\t/usr/share/deviceinfo/deviceinfo:", deverr_usr) retCode = 1
log.Println("\t/etc/deviceinfo:", deverr_etc) log.Printf("received unexpected error when getting status for %q: %s", deviceinfoFile, err)
}
devinfo, err := deviceinfo.ReadDeviceinfo(deviceinfoFile)
if err != nil {
log.Println(err)
retCode = 1 retCode = 1
return return
} }
@@ -86,100 +85,52 @@ func main() {
defer func() { defer func() {
e := os.RemoveAll(workDir) e := os.RemoveAll(workDir)
if e != nil && err == nil { if e != nil && err == nil {
log.Println(e) err = e
log.Println("unable to remove temporary work directory") retCode = 1
} }
}() }()
log.Print("Generating for kernel version: ", kernVer) log.Print("Generating for kernel version: ", kernVer)
log.Print("Output directory: ", *outDir) log.Print("Output directory: ", *outDir)
//
// initramfs
//
// deviceinfo.InitfsCompression needs a little more post-processing // deviceinfo.InitfsCompression needs a little more post-processing
compressionFormat, compressionLevel := archive.ExtractFormatLevel(devinfo.InitfsCompression) compressionFormat, compressionLevel := archive.ExtractFormatLevel(devinfo.InitfsCompression)
log.Printf("== Generating %s ==\n", "initramfs") if err := generateArchive("initramfs", compressionFormat, compressionLevel, workDir, []filelist.FileLister{
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
start := time.Now()
initramfsAr := archive.New(compressionFormat, compressionLevel)
initfs := initramfs.New([]filelist.FileLister{
hookdirs.New("/usr/share/mkinitfs/dirs"), hookdirs.New("/usr/share/mkinitfs/dirs"),
hookdirs.New("/etc/mkinitfs/dirs"), hookdirs.New("/etc/mkinitfs/dirs"),
hookfiles.New("/usr/share/mkinitfs/files"), hookfiles.New("/usr/share/mkinitfs/files"),
hookfiles.New("/etc/mkinitfs/files"), hookfiles.New("/etc/mkinitfs/files"),
hookscripts.New("/usr/share/mkinitfs/hooks", "/hooks"), hookscripts.New("/usr/share/mkinitfs/hooks", "/hooks"),
hookscripts.New("/etc/mkinitfs/hooks", "/hooks"), hookscripts.New("/etc/mkinitfs/hooks", "/hooks"),
hookscripts.New("/usr/share/mkinitfs/hooks-cleanup", "/hooks-cleanup"), modules.New(strings.Fields(devinfo.ModulesInitfs), "/usr/share/mkinitfs/modules"),
hookscripts.New("/etc/mkinitfs/hooks-cleanup", "/hooks-cleanup"), modules.New([]string{}, "/etc/mkinitfs/modules"),
//modules.New("/usr/share/mkinitfs/modules"), }); err != nil {
modules.New("/etc/mkinitfs/modules"), log.Println(err)
}) log.Println("failed to generate: ", "initramfs")
initfsExtra := initramfs.New([]filelist.FileLister{ retCode = 1
return
}
// deviceinfo.InitfsExtraCompression needs a little more post-processing
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
if err := generateArchive("initramfs-extra", compressionFormat, compressionLevel, workDir, []filelist.FileLister{
hookfiles.New("/usr/share/mkinitfs/files-extra"), hookfiles.New("/usr/share/mkinitfs/files-extra"),
hookfiles.New("/etc/mkinitfs/files-extra"), hookfiles.New("/etc/mkinitfs/files-extra"),
hookscripts.New("/usr/share/mkinitfs/hooks-extra", "/hooks-extra"), hookscripts.New("/usr/share/mkinitfs/hooks-extra", "/hooks-extra"),
hookscripts.New("/etc/mkinitfs/hooks-extra", "/hooks-extra"), hookscripts.New("/etc/mkinitfs/hooks-extra", "/hooks-extra"),
modules.New("/usr/share/mkinitfs/modules-extra"), modules.New([]string{}, "/usr/share/mkinitfs/modules-extra"),
modules.New("/etc/mkinitfs/modules-extra"), modules.New([]string{}, "/etc/mkinitfs/modules-extra"),
}) osksdl.New(devinfo.MesaDriver),
}); err != nil {
if err := initramfsAr.AddItems(initfs); err != nil {
log.Println(err) log.Println(err)
log.Println("failed to generate: ", "initramfs") log.Println("failed to generate: ", "initramfs-extra")
retCode = 1 retCode = 1
return return
} }
// Include initramfs-extra files in the initramfs if not making a separate
// archive
if !devinfo.CreateInitfsExtra {
if err := initramfsAr.AddItems(initfsExtra); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs")
retCode = 1
return
}
}
if err := initramfsAr.Write(filepath.Join(workDir, "initramfs"), os.FileMode(0644)); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs")
retCode = 1
return
}
misc.TimeFunc(start, "initramfs")
if devinfo.CreateInitfsExtra {
//
// initramfs-extra
//
// deviceinfo.InitfsExtraCompression needs a little more post-processing
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
log.Printf("== Generating %s ==\n", "initramfs-extra")
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
start = time.Now()
initramfsExtraAr := archive.New(compressionFormat, compressionLevel)
if err := initramfsExtraAr.AddItemsExclude(initfsExtra, initfs); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs-extra")
retCode = 1
return
}
if err := initramfsExtraAr.Write(filepath.Join(workDir, "initramfs-extra"), os.FileMode(0644)); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs-extra")
retCode = 1
return
}
misc.TimeFunc(start, "initramfs-extra")
}
// Final processing of initramfs / kernel is done by boot-deploy // Final processing of initramfs / kernel is done by boot-deploy
if !disableBootDeploy { if !disableBootDeploy {
if err := bootDeploy(workDir, *outDir, devinfo); err != nil { if err := bootDeploy(workDir, *outDir, devinfo.UbootBoardname); err != nil {
log.Println(err) log.Println(err)
log.Println("boot-deploy failed") log.Println("boot-deploy failed")
retCode = 1 retCode = 1
@@ -188,10 +139,30 @@ func main() {
} }
} }
func bootDeploy(workDir string, outDir string, devinfo deviceinfo.DeviceInfo) error { func bootDeploy(workDir, outDir, ubootBoardname string) error {
log.Print("== Using boot-deploy to finalize/install files ==") log.Print("== Using boot-deploy to finalize/install files ==")
defer misc.TimeFunc(time.Now(), "boot-deploy") defer misc.TimeFunc(time.Now(), "boot-deploy")
bd := bootdeploy.New(workDir, outDir, devinfo) bd := bootdeploy.New(workDir, outDir, ubootBoardname)
return bd.Run() return bd.Run()
} }
func generateArchive(name string, format archive.CompressFormat, level archive.CompressLevel, path string, features []filelist.FileLister) error {
log.Printf("== Generating %s ==\n", name)
log.Printf("- Using compression format %s with level %q\n", format, level)
defer misc.TimeFunc(time.Now(), name)
a := archive.New(format, level)
fs := initramfs.New(features)
if err := a.AddItems(fs); err != nil {
return err
}
log.Println("- Writing and verifying archive: ", name)
if err := a.Write(filepath.Join(path, name), os.FileMode(0644)); err != nil {
return err
}
return nil
}

View File

@@ -38,14 +38,13 @@ Design goals of this project are:
The canonical deviceinfo "specification" is at The canonical deviceinfo "specification" is at
https://wiki.postmarketos.org/wiki/Deviceinfo_reference https://wiki.postmarketos.org/wiki/Deviceinfo_reference
mkinitfs reads deviceinfo values from */usr/share/deviceinfo/deviceinfo* and mkinitfs reads deviceinfo values from */etc/deviceinfo*. The following variables
*/etc/deviceinfo*, in that order. The following variables
are *required* by mkinitfs: are *required* by mkinitfs:
- deviceinfo_create_initfs_extra
- deviceinfo_generate_systemd_boot
- deviceinfo_initfs_compression - deviceinfo_initfs_compression
- deviceinfo_initfs_extra_compression - deviceinfo_initfs_extra_compression
- deviceinfo_mesa_driver
- deviceinfo_modules_initfs
- deviceinfo_uboot_boardname - deviceinfo_uboot_boardname
It is a design goal to keep the number of required variables from deviceinfo to It is a design goal to keep the number of required variables from deviceinfo to
@@ -55,36 +54,6 @@ a bare minimum, and to require only variables that don't hold lists of things.
necessary tools to extract the configured archive format are in the initramfs necessary tools to extract the configured archive format are in the initramfs
archive. archive.
# ARCHIVE COMPRESSION
Archive compression parameters are specified in the
*deviceinfo_initfs_compression* and *deviceinfo_initfs_extra_compression*
deviceinfo variables. Their values do not have to match, but special
consideration should be taken since some formats may require additional kernel
options or tools in the initramfs to support it.
Supported compression *formats* for mkinitfs are:
- gzip
- lz4
- lzma
- none
- zstd
Supported compression *levels* for mkinitfs:
- best
- default
- fast
The value of these variables follows this syntax: *<format>:<level>*. For
example, *zstd* with the *fast* compression level would be:
*deviceinfo_initfs_compression="zstd:fast"*
Defaults to *gzip* and *default* for both archives if format and/or level is
unsupported or omitted.
# DIRECTORIES # DIRECTORIES
The following directories are used by mkinitfs to generate the initramfs and The following directories are used by mkinitfs to generate the initramfs and
@@ -94,7 +63,7 @@ it are for constructing the initramfs archive.
Configuration under */usr/share/mkinitfs* is intended to be managed by Configuration under */usr/share/mkinitfs* is intended to be managed by
distributions, while configuration under */etc/mkinitfs* is for users to distributions, while configuration under */etc/mkinitfs* is for users to
create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, and then from */etc/mkinitfs*. create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, and then from */etc/mkinitfs*.
## /usr/share/mkinitfs/files, /etc/mkinitfs/files ## /usr/share/mkinitfs/files, /etc/mkinitfs/files
## /usr/share/mkinitfs/files-extra, /etc/mkinitfs/files-extra ## /usr/share/mkinitfs/files-extra, /etc/mkinitfs/files-extra
@@ -130,12 +99,8 @@ create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, an
path(s) under the relevant directory in */etc/mkinitfs*, and changing path(s) under the relevant directory in */etc/mkinitfs*, and changing
the destination path. the destination path.
Any lines in these files that start with *#* are considered comments, and
skipped.
## /usr/share/mkinitfs/hooks, /etc/mkinitfs/hooks ## /usr/share/mkinitfs/hooks, /etc/mkinitfs/hooks
## /usr/share/mkinitfs/hooks-cleanup, /etc/mkinitfs/hooks-cleanup ## /usr/share/mkinitfs/hooks-extra*, /etc/mkinitfs/hooks-extra
## /usr/share/mkinitfs/hooks-extra, /etc/mkinitfs/hooks-extra
Any files listed under these directories are copied as-is into the Any files listed under these directories are copied as-is into the
relevant archives. Hooks are generally script files, but how they are relevant archives. Hooks are generally script files, but how they are
@@ -148,25 +113,19 @@ create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, an
## /usr/share/mkinitfs/modules, /etc/mkinitfs/modules ## /usr/share/mkinitfs/modules, /etc/mkinitfs/modules
## /usr/share/mkinitfs/modules-extra, /etc/mkinitfs/modules-extra ## /usr/share/mkinitfs/modules-extra, /etc/mkinitfs/modules-extra
Files with the *.modules* extension in these directories are lists of Files with the *.modules* extention in these directories are lists of
kernel modules to include in the initramfs. Individual modules and kernel modules to include in the initramfs. Individual modules and
directories can be listed in the files here. Globbing is also supported. directories can be listed in the files here. Globbing is also supported.
Modules are installed in the initramfs archive under the same path they Modules are installed in the initramfs archive under the same path they
exist on the system where mkinitfs is executed. exist on the system where mkinitfs is executed.
Any lines in these files that start with *#* are considered comments, and
skipped.
## /usr/share/mkinitfs/dirs, /etc/mkinitfs/dirs ## /usr/share/mkinitfs/dirs, /etc/mkinitfs/dirs
Files with the *.dirs* extension in these directories are lists of Files with the *.dirs* extension in these directories are lists of
directories to create within the initramfs. There is no *-extra* variant, directories to create within the initramfs. There is no *-extra* variant,
since directories are of negligible size. since directories are of negligible size.
Any lines in these files that start with *#* are considered comments, and
skipped.
# BOOT-DEPLOY # BOOT-DEPLOY
After generating archives, mkinitfs will execute *boot-deploy*, using *$PATH* to After generating archives, mkinitfs will execute *boot-deploy*, using *$PATH* to
@@ -177,7 +136,7 @@ search for the app. The following commandline options are passed to it:
Currently this is hardcoded to be "initramfs" Currently this is hardcoded to be "initramfs"
*-k* <kernel filename> *-k* <kernel filename>
*-d* <work directory> *-d* <work directory>
Path to the directory containing the build artifacts from mkinitfs. Path to the directory containing the build artifacts from mkinitfs.

11
go.mod
View File

@@ -5,15 +5,6 @@ go 1.20
require ( require (
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e
github.com/klauspost/compress v1.15.12 github.com/klauspost/compress v1.15.12
github.com/pierrec/lz4/v4 v4.1.17
github.com/ulikunitz/xz v0.5.10 github.com/ulikunitz/xz v0.5.10
golang.org/x/sys v0.18.0 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
)
require (
github.com/mvdan/sh v2.6.4+incompatible // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/term v0.18.0 // indirect
mvdan.cc/sh v2.6.4+incompatible // indirect
) )

14
go.sum
View File

@@ -2,21 +2,7 @@ github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RS
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/mvdan/sh v2.6.4+incompatible h1:D4oEWW0J8cL7zeQkrXw76IAYXF0mJfDaBwjgzmKb6zs=
github.com/mvdan/sh v2.6.4+incompatible/go.mod h1:kipHzrJQZEDCMTNRVRAlMMFjqHEYrthfIlFkJSrmDZE=
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
mvdan.cc/sh v2.6.4+incompatible h1:eD6tDeh0pw+/TOTI1BBEryZ02rD2nMcFsgcvde7jffM=
mvdan.cc/sh v2.6.4+incompatible/go.mod h1:IeeQbZq+x2SUGBensq/jge5lLQbS3XT2ktyp3wrt4x8=

View File

@@ -18,7 +18,6 @@ import (
"github.com/cavaliercoder/go-cpio" "github.com/cavaliercoder/go-cpio"
"github.com/klauspost/compress/zstd" "github.com/klauspost/compress/zstd"
"github.com/pierrec/lz4/v4"
"github.com/ulikunitz/xz" "github.com/ulikunitz/xz"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist" "gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil" "gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
@@ -29,7 +28,6 @@ type CompressFormat string
const ( const (
FormatGzip CompressFormat = "gzip" FormatGzip CompressFormat = "gzip"
FormatLzma CompressFormat = "lzma" FormatLzma CompressFormat = "lzma"
FormatLz4 CompressFormat = "lz4"
FormatZstd CompressFormat = "zstd" FormatZstd CompressFormat = "zstd"
FormatNone CompressFormat = "none" FormatNone CompressFormat = "none"
) )
@@ -106,7 +104,6 @@ func ExtractFormatLevel(s string) (format CompressFormat, level CompressLevel) {
case FormatLzma: case FormatLzma:
log.Println("Format lzma doesn't support a compression level, using default settings") log.Println("Format lzma doesn't support a compression level, using default settings")
level = LevelDefault level = LevelDefault
case FormatLz4:
case FormatNone: case FormatNone:
case FormatZstd: case FormatZstd:
default: default:
@@ -203,44 +200,9 @@ func (archive *Archive) AddItems(flister filelist.FileLister) error {
return nil return nil
} }
// AddItemsExclude is like AddItems, but takes a second FileLister that lists
// items that should not be added to the archive from the first FileLister
func (archive *Archive) AddItemsExclude(flister filelist.FileLister, exclude filelist.FileLister) error {
list, err := flister.List()
if err != nil {
return err
}
excludeList, err := exclude.List()
if err != nil {
return err
}
for i := range list.IterItems() {
dest, found := excludeList.Get(i.Source)
if found {
if i.Dest != dest {
found = false
}
}
if !found {
if err := archive.AddItem(i.Source, i.Dest); err != nil {
return err
}
}
}
return nil
}
// Adds the given file or directory at "source" to the archive at "dest" // Adds the given file or directory at "source" to the archive at "dest"
func (archive *Archive) AddItem(source string, dest string) error { func (archive *Archive) AddItem(source string, dest string) error {
if osutil.HasMergedUsr() {
source = osutil.MergeUsr(source)
dest = osutil.MergeUsr(dest)
}
sourceStat, err := os.Lstat(source) sourceStat, err := os.Lstat(source)
if err != nil { if err != nil {
e, ok := err.(*os.PathError) e, ok := err.(*os.PathError)
@@ -251,12 +213,6 @@ func (archive *Archive) AddItem(source string, dest string) error {
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err) return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
} }
// A symlink to a directory doesn't have the os.ModeDir bit set, so we need
// to check if it's a symlink first
if sourceStat.Mode()&os.ModeSymlink != 0 {
return archive.addSymlink(source, dest)
}
if sourceStat.Mode()&os.ModeDir != 0 { if sourceStat.Mode()&os.ModeDir != 0 {
return archive.addDir(dest) return archive.addDir(dest)
} }
@@ -264,45 +220,6 @@ func (archive *Archive) AddItem(source string, dest string) error {
return archive.addFile(source, dest) return archive.addFile(source, dest)
} }
func (archive *Archive) addSymlink(source string, dest string) error {
target, err := os.Readlink(source)
if err != nil {
log.Print("addSymlink: failed to get symlink target for: ", source)
return err
}
// Make sure we pick up the symlink target too
targetAbs := target
if filepath.Dir(target) == "." {
// relative symlink, make it absolute so we can add the target to the archive
targetAbs = filepath.Join(filepath.Dir(source), target)
}
if !filepath.IsAbs(targetAbs) {
targetAbs, err = osutil.RelativeSymlinkTargetToDir(targetAbs, filepath.Dir(source))
if err != nil {
return err
}
}
archive.AddItem(targetAbs, targetAbs)
// Now add the symlink itself
destFilename := strings.TrimPrefix(dest, "/")
archive.items.add(archiveItem{
sourcePath: source,
header: &cpio.Header{
Name: destFilename,
Linkname: target,
Mode: 0644 | cpio.ModeSymlink,
Size: int64(len(target)),
},
})
return nil
}
func (archive *Archive) addFile(source string, dest string) error { func (archive *Archive) addFile(source string, dest string) error {
if err := archive.addDir(filepath.Dir(dest)); err != nil { if err := archive.addDir(filepath.Dir(dest)); err != nil {
return err return err
@@ -314,6 +231,42 @@ func (archive *Archive) addFile(source string, dest string) error {
return err return err
} }
// Symlink: write symlink to archive then set 'file' to link target
if sourceStat.Mode()&os.ModeSymlink != 0 {
// log.Printf("File %q is a symlink", file)
target, err := os.Readlink(source)
if err != nil {
log.Print("addFile: failed to get symlink target: ", source)
return err
}
destFilename := strings.TrimPrefix(dest, "/")
archive.items.add(archiveItem{
sourcePath: source,
header: &cpio.Header{
Name: destFilename,
Linkname: target,
Mode: 0644 | cpio.ModeSymlink,
Size: int64(len(target)),
// Checksum: 1,
},
})
if filepath.Dir(target) == "." {
target = filepath.Join(filepath.Dir(source), target)
}
// make sure target is an absolute path
if !filepath.IsAbs(target) {
target, err = osutil.RelativeSymlinkTargetToDir(target, filepath.Dir(source))
if err != nil {
return err
}
}
err = archive.addFile(target, target)
return err
}
destFilename := strings.TrimPrefix(dest, "/") destFilename := strings.TrimPrefix(dest, "/")
archive.items.add(archiveItem{ archive.items.add(archiveItem{
@@ -363,23 +316,6 @@ func (archive *Archive) writeCompressed(path string, mode os.FileMode) (err erro
if err != nil { if err != nil {
return err return err
} }
case FormatLz4:
// The default compression for the lz4 library is Fast, and
// they don't define a Default level otherwise
level := lz4.Fast
switch archive.compress_level {
case LevelBest:
level = lz4.Level9
case LevelFast:
level = lz4.Fast
}
var writer = lz4.NewWriter(fd)
err = writer.Apply(lz4.LegacyOption(true), lz4.CompressionLevelOption(level))
if err != nil {
return err
}
compressor = writer
case FormatNone: case FormatNone:
compressor = fd compressor = fd
case FormatZstd: case FormatZstd:
@@ -416,12 +352,6 @@ func (archive *Archive) writeCompressed(path string, mode os.FileMode) (err erro
} }
func (archive *Archive) writeCpio() error { func (archive *Archive) writeCpio() error {
// Just in case
if osutil.HasMergedUsr() {
archive.addSymlink("/bin", "/bin")
archive.addSymlink("/sbin", "/sbin")
archive.addSymlink("/lib", "/lib")
}
// having a transient function for actually adding files to the archive // having a transient function for actually adding files to the archive
// allows the deferred fd.close to run after every copy and prevent having // allows the deferred fd.close to run after every copy and prevent having
// tons of open file handles until the copying is all done // tons of open file handles until the copying is all done
@@ -436,19 +366,19 @@ func (archive *Archive) writeCpio() error {
if header.Mode.IsRegular() { if header.Mode.IsRegular() {
fd, err := os.Open(source) fd, err := os.Open(source)
if err != nil { if err != nil {
return fmt.Errorf("archive.writeCpio: Unable to open file %q, %w", source, err) return fmt.Errorf("archive.writeCpio: uname to open file %q, %w", source, err)
} }
defer fd.Close() defer fd.Close()
if _, err := io.Copy(archive.cpioWriter, fd); err != nil { if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
return fmt.Errorf("archive.writeCpio: Couldn't process %q: %w", source, err) return fmt.Errorf("archive.writeCpio: unable to write out archive: %w", err)
} }
} else if header.Linkname != "" { } else if header.Linkname != "" {
// the contents of a symlink is just need the link name // the contents of a symlink is just need the link name
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil { if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %q -> %q: %w", source, header.Linkname, err) return fmt.Errorf("archive.writeCpio: unable to write out symlink: %w", err)
} }
} else { } else {
return fmt.Errorf("archive.writeCpio: unknown type for file: %q: %d", source, header.Mode) return fmt.Errorf("archive.writeCpio: unknown type for file: %s", source)
} }
} }

View File

@@ -249,12 +249,6 @@ func TestExtractFormatLevel(t *testing.T) {
expectedFormat: FormatLzma, expectedFormat: FormatLzma,
expectedLevel: LevelDefault, expectedLevel: LevelDefault,
}, },
{
name: "lz4, fast",
in: "lz4:fast",
expectedFormat: FormatLz4,
expectedLevel: LevelFast,
},
{ {
name: "none", name: "none",
in: "none", in: "none",

View File

@@ -10,32 +10,32 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"strings" "strings"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
) )
type BootDeploy struct { type BootDeploy struct {
inDir string inDir string
outDir string outDir string
devinfo deviceinfo.DeviceInfo ubootBoardname string
} }
// New returns a new BootDeploy, which then runs: // New returns a new BootDeploy, which then runs:
// //
// boot-deploy -d indir -o outDir // boot-deploy -d indir -o outDir
// //
// devinfo is used to access some deviceinfo values, such as UbootBoardname // ubootBoardname is used for copying in some u-boot files prior to running
// and GenerateSystemdBoot // boot-deploy. This is optional, passing an empty string is ok if this is not
func New(inDir string, outDir string, devinfo deviceinfo.DeviceInfo) *BootDeploy { // needed.
func New(inDir, outDir, ubootBoardname string) *BootDeploy {
return &BootDeploy{ return &BootDeploy{
inDir: inDir, inDir: inDir,
outDir: outDir, outDir: outDir,
devinfo: devinfo, ubootBoardname: ubootBoardname,
} }
} }
func (b *BootDeploy) Run() error { func (b *BootDeploy) Run() error {
if err := copyUbootFiles(b.inDir, b.devinfo.UbootBoardname); errors.Is(err, os.ErrNotExist) {
if err := copyUbootFiles(b.inDir, b.ubootBoardname); errors.Is(err, os.ErrNotExist) {
log.Println("u-boot files copying skipped: ", err) log.Println("u-boot files copying skipped: ", err)
} else { } else {
if err != nil { if err != nil {
@@ -43,9 +43,15 @@ func (b *BootDeploy) Run() error {
} }
} }
kernels, err := getKernelPath(b.outDir, b.devinfo.GenerateSystemdBoot == "true") return bootDeploy(b.inDir, b.outDir)
if err != nil { }
return err
func bootDeploy(workDir string, outDir string) error {
// boot-deploy expects the kernel to be in the same dir as initramfs.
// Assume that the kernel is in the output dir...
kernels, _ := filepath.Glob(filepath.Join(outDir, "vmlinuz*"))
if len(kernels) == 0 {
return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*"))
} }
// Pick a kernel that does not have suffixes added by boot-deploy // Pick a kernel that does not have suffixes added by boot-deploy
@@ -65,7 +71,7 @@ func (b *BootDeploy) Run() error {
defer kernFd.Close() defer kernFd.Close()
kernFilename := path.Base(kernFile) kernFilename := path.Base(kernFile)
kernFileCopy, err := os.Create(filepath.Join(b.inDir, kernFilename)) kernFileCopy, err := os.Create(filepath.Join(workDir, kernFilename))
if err != nil { if err != nil {
return err return err
} }
@@ -78,17 +84,12 @@ func (b *BootDeploy) Run() error {
} }
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra // boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
args := []string{ cmd := exec.Command("boot-deploy",
"-i", "initramfs", "-i", "initramfs",
"-k", kernFilename, "-k", kernFilename,
"-d", b.inDir, "-d", workDir,
"-o", b.outDir, "-o", outDir,
} "initramfs-extra")
if b.devinfo.CreateInitfsExtra {
args = append(args, "initramfs-extra")
}
cmd := exec.Command("boot-deploy", args...)
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
@@ -99,25 +100,6 @@ func (b *BootDeploy) Run() error {
return nil return nil
} }
func getKernelPath(outDir string, zboot bool) ([]string, error) {
var kernels []string
if zboot {
kernels, _ = filepath.Glob(filepath.Join(outDir, "linux.efi"))
if len(kernels) > 0 {
return kernels, nil
}
// else fallback to vmlinuz* below
}
kernFile := "vmlinuz*"
kernels, _ = filepath.Glob(filepath.Join(outDir, kernFile))
if len(kernels) == 0 {
return nil, errors.New("Unable to find any kernels at " + filepath.Join(outDir, kernFile))
}
return kernels, nil
}
// Copy copies the file at srcFile path to a new file at dstFile path // Copy copies the file at srcFile path to a new file at dstFile path
func copy(srcFile, dstFile string) error { func copy(srcFile, dstFile string) error {
out, err := os.Create(dstFile) out, err := os.Create(dstFile)

View File

@@ -1,6 +1,12 @@
package filelist package filelist
import "sync" import (
"fmt"
"path/filepath"
"sync"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
)
type FileLister interface { type FileLister interface {
List() (*FileList, error) List() (*FileList, error)
@@ -45,6 +51,26 @@ func (f *FileList) Import(src *FileList) {
} }
} }
func (f *FileList) AddGlobbed(src string, dest string) error {
fFiles, err := misc.GetFiles([]string{src}, true)
if err != nil {
return fmt.Errorf("unable to add %q: %w", src, err)
}
// loop over all returned files from GetFile
for _, file := range fFiles {
if len(fFiles) > 1 {
// Glob with arbitrary subdirectories, so we need to
// remove the src path and prepend the dest path
f.Add(file, filepath.Join(dest, file[len(src):]))
} else {
// dest path specified, and only 1 file
f.Add(file, dest)
}
}
return nil
}
// iterate through the list and and send each one as a new File over the // iterate through the list and and send each one as a new File over the
// returned channel // returned channel
func (f *FileList) IterItems() <-chan File { func (f *FileList) IterItems() <-chan File {

View File

@@ -6,7 +6,6 @@ import (
"log" "log"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist" "gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
) )
@@ -44,11 +43,7 @@ func (h *HookDirs) List() (*filelist.FileList, error) {
s := bufio.NewScanner(f) s := bufio.NewScanner(f)
for s.Scan() { for s.Scan() {
dir := strings.TrimSpace(s.Text()) dir := s.Text()
if len(dir) == 0 || strings.HasPrefix(dir, "#") {
continue
}
files.Add(dir, dir) files.Add(dir, dir)
} }
} }

View File

@@ -10,8 +10,6 @@ import (
"strings" "strings"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist" "gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
) )
type HookFiles struct { type HookFiles struct {
@@ -59,32 +57,15 @@ func slurpFiles(fd io.Reader) (*filelist.FileList, error) {
s := bufio.NewScanner(fd) s := bufio.NewScanner(fd)
for s.Scan() { for s.Scan() {
line := strings.TrimSpace(s.Text()) src, dest, has_dest := strings.Cut(s.Text(), ":")
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue if !has_dest {
dest = src
} }
src, dest, has_dest := strings.Cut(line, ":") err := files.AddGlobbed(src, dest)
if osutil.HasMergedUsr() {
src = osutil.MergeUsr(src)
}
fFiles, err := misc.GetFiles([]string{src}, true)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to add %q: %w", src, err) return nil, err
}
// loop over all returned files from GetFile
for _, file := range fFiles {
if !has_dest {
files.Add(file, file)
} else if len(fFiles) > 1 {
// Don't support specifying dest if src was a glob
// NOTE: this could support this later...
files.Add(file, file)
} else {
// dest path specified, and only 1 file
files.Add(file, dest)
}
} }
} }

View File

@@ -35,8 +35,13 @@ func (h *HookScripts) List() (*filelist.FileList, error) {
} }
for _, file := range fileInfo { for _, file := range fileInfo {
path := filepath.Join(h.scriptsDir, file.Name()) path := filepath.Join(h.scriptsDir, file.Name())
log.Printf("-- Including script: %s\n", path) if file.IsDir() {
files.Add(path, filepath.Join(h.destPath, file.Name())) log.Printf("-- Including dir %s\n", path)
files.AddGlobbed(filepath.Join(path, "*"), filepath.Join(h.destPath, file.Name()))
} else {
log.Printf("-- Including script: %s\n", path)
files.Add(path, filepath.Join(h.destPath, file.Name()))
}
} }
return files, nil return files, nil
} }

View File

@@ -9,7 +9,6 @@ import (
// combining the output from them. // combining the output from them.
type Initramfs struct { type Initramfs struct {
features []filelist.FileLister features []filelist.FileLister
files *filelist.FileList
} }
// New returns a new Initramfs that generate a list of files based on the given // New returns a new Initramfs that generate a list of files based on the given
@@ -21,18 +20,15 @@ func New(features []filelist.FileLister) *Initramfs {
} }
func (i *Initramfs) List() (*filelist.FileList, error) { func (i *Initramfs) List() (*filelist.FileList, error) {
if i.files != nil { files := filelist.NewFileList()
return i.files, nil
}
i.files = filelist.NewFileList()
for _, f := range i.features { for _, f := range i.features {
list, err := f.List() list, err := f.List()
if err != nil { if err != nil {
return nil, err return nil, err
} }
i.files.Import(list) files.Import(list)
} }
return i.files, nil return files, nil
} }

View File

@@ -17,11 +17,14 @@ import (
type Modules struct { type Modules struct {
modulesListPath string modulesListPath string
modulesList []string
} }
// New returns a new Modules that will read in lists of kernel modules in the given path. // New returns a new Modules that will use the given moduleto provide a list
func New(modulesListPath string) *Modules { // of script files.
func New(modulesList []string, modulesListPath string) *Modules {
return &Modules{ return &Modules{
modulesList: modulesList,
modulesListPath: modulesListPath, modulesListPath: modulesListPath,
} }
} }
@@ -33,14 +36,8 @@ func (m *Modules) List() (*filelist.FileList, error) {
} }
files := filelist.NewFileList() files := filelist.NewFileList()
libDir := "/usr/lib/modules"
if exists, err := misc.Exists(libDir); !exists {
libDir = "/lib/modules"
} else if err != nil {
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", libDir, err)
}
modDir := filepath.Join(libDir, kernVer) modDir := filepath.Join("/lib/modules", kernVer)
if exists, err := misc.Exists(modDir); !exists { if exists, err := misc.Exists(modDir); !exists {
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message // dir /lib/modules/<kernel> if kernel built without module support, so just print a message
log.Printf("-- kernel module directory not found: %q, not including modules", modDir) log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
@@ -55,6 +52,20 @@ func (m *Modules) List() (*filelist.FileList, error) {
files.Add(file, file) files.Add(file, file)
} }
// slurp up given list of modules
if len(m.modulesList) > 0 {
log.Printf("-- Including kernel modules from deviceinfo")
for _, module := range m.modulesList {
if modFilelist, err := getModule(module, modDir); err != nil {
return nil, fmt.Errorf("unable to get modules from deviceinfo: %w", err)
} else {
for _, file := range modFilelist {
files.Add(file, file)
}
}
}
}
// slurp up modules from lists in modulesListPath // slurp up modules from lists in modulesListPath
log.Printf("- Searching for kernel modules from %s", m.modulesListPath) log.Printf("- Searching for kernel modules from %s", m.modulesListPath)
fileInfo, err := os.ReadDir(m.modulesListPath) fileInfo, err := os.ReadDir(m.modulesListPath)
@@ -83,10 +94,7 @@ func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
files := filelist.NewFileList() files := filelist.NewFileList()
s := bufio.NewScanner(fd) s := bufio.NewScanner(fd)
for s.Scan() { for s.Scan() {
line := strings.TrimSpace(s.Text()) line := s.Text()
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue
}
dir, file := filepath.Split(line) dir, file := filepath.Split(line)
if file == "" { if file == "" {
// item is a directory // item is a directory
@@ -103,8 +111,8 @@ func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
} }
} else if dir == "" { } else if dir == "" {
// item is a module name // item is a module name
if modFilelist, err := getModule(line, modDir); err != nil { if modFilelist, err := getModule(s.Text(), modDir); err != nil {
return nil, fmt.Errorf("unable to get module file %q: %w", line, err) return nil, fmt.Errorf("unable to get module file %q: %w", s.Text(), err)
} else { } else {
for _, file := range modFilelist { for _, file := range modFilelist {
files.Add(file, file) files.Add(file, file)
@@ -124,9 +132,7 @@ func getModulesInDir(modPath string) (files []string, err error) {
// Unable to walk path // Unable to walk path
return err return err
} }
// this assumes module names are in the format <name>.ko[.format], if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
// where ".format" (e.g. ".gz") is optional.
if !strings.Contains(".ko", path) {
return nil return nil
} }
files = append(files, path) files = append(files, path)
@@ -188,12 +194,7 @@ func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
s := bufio.NewScanner(modulesDep) s := bufio.NewScanner(modulesDep)
for s.Scan() { for s.Scan() {
line := strings.TrimSpace(s.Text()) fields := strings.Fields(s.Text())
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue
}
fields := strings.Fields(line)
if len(fields) == 0 { if len(fields) == 0 {
continue continue
} }

View File

@@ -18,7 +18,6 @@ func TestStripExts(t *testing.T) {
{"another_file", "another_file"}, {"another_file", "another_file"},
{"a.b.c.d.e.f.g.h.i", "a"}, {"a.b.c.d.e.f.g.h.i", "a"},
{"virtio_blk.ko", "virtio_blk"}, {"virtio_blk.ko", "virtio_blk"},
{"virtio_blk.ko ", "virtio_blk"},
} }
for _, table := range tables { for _, table := range tables {
out := stripExts(table.in) out := stripExts(table.in)

View File

@@ -0,0 +1,158 @@
package osksdl
import (
"bufio"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
)
type OskSdl struct {
mesaDriver string
}
// New returns a new HookScripts that will use the given path to provide a list
// of script files.
func New(mesaDriverName string) *OskSdl {
return &OskSdl{
mesaDriver: mesaDriverName,
}
}
// Get a list of files and their dependencies related to supporting rootfs full
// disk (d)encryption
func (s *OskSdl) List() (*filelist.FileList, error) {
files := filelist.NewFileList()
if exists, err := misc.Exists("/usr/bin/osk-sdl"); !exists {
return files, nil
} else if err != nil {
return files, fmt.Errorf("received unexpected error when getting status for %q: %w", "/usr/bin/osk-sdl", err)
}
log.Println("- Including osk-sdl support")
confFiles := []string{
"/etc/osk.conf",
"/etc/ts.conf",
"/etc/pointercal",
"/etc/fb.modes",
"/etc/directfbrc",
}
confFileList, err := misc.GetFiles(confFiles, false)
if err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
}
for _, file := range confFileList {
files.Add(file, file)
}
// osk-sdl
oskFiles := []string{
"/usr/bin/osk-sdl",
"/sbin/cryptsetup",
"/usr/lib/libGL.so.1",
}
if oskFileList, err := misc.GetFiles(oskFiles, true); err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
} else {
for _, file := range oskFileList {
files.Add(file, file)
}
}
fontFile, err := getOskConfFontPath("/etc/osk.conf")
if err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add file %q: %w", fontFile, err)
}
files.Add(fontFile, fontFile)
// Directfb
dfbFiles := []string{}
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
if filepath.Ext(path) == ".so" {
dfbFiles = append(dfbFiles, path)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add file %w", err)
}
if dfbFileList, err := misc.GetFiles(dfbFiles, true); err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
} else {
for _, file := range dfbFileList {
files.Add(file, file)
}
}
// tslib
tslibFiles := []string{}
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
if filepath.Ext(path) == ".so" {
tslibFiles = append(tslibFiles, path)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add file: %w", err)
}
libts, _ := filepath.Glob("/usr/lib/libts*")
tslibFiles = append(tslibFiles, libts...)
if tslibFileList, err := misc.GetFiles(tslibFiles, true); err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
} else {
for _, file := range tslibFileList {
files.Add(file, file)
}
}
// mesa hw accel
if s.mesaDriver != "" {
mesaFiles := []string{
"/usr/lib/libEGL.so.1",
"/usr/lib/libGLESv2.so.2",
"/usr/lib/libgbm.so.1",
"/usr/lib/libudev.so.1",
"/usr/lib/xorg/modules/dri/" + s.mesaDriver + "_dri.so",
}
if mesaFileList, err := misc.GetFiles(mesaFiles, true); err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
} else {
for _, file := range mesaFileList {
files.Add(file, file)
}
}
}
return files, nil
}
func getOskConfFontPath(oskConfPath string) (string, error) {
var path string
f, err := os.Open(oskConfPath)
if err != nil {
return path, err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
fields := strings.Fields(s.Text())
// "key = val" is 3 fields
if len(fields) > 2 && fields[0] == "keyboard-font" {
path = fields[2]
}
}
if exists, err := misc.Exists(path); !exists {
return path, fmt.Errorf("unable to find font: %s", path)
} else if err != nil {
return path, fmt.Errorf("received unexpected error when getting status for %q: %w", path, err)
}
return path, nil
}

View File

@@ -3,7 +3,6 @@ package misc
import ( import (
"debug/elf" "debug/elf"
"fmt" "fmt"
"io/fs"
"os" "os"
"path/filepath" "path/filepath"
@@ -40,42 +39,12 @@ func getFile(file string, required bool) (files []string, err error) {
return RemoveDuplicates(files), nil return RemoveDuplicates(files), nil
} }
// If the file is a symlink we need to do this to prevent an infinite recursion
// loop:
// Symlinks need special handling to prevent infinite recursion:
// 1) add the symlink to the list of files
// 2) set file to dereferenced target
// 4) continue this function to either walk it if the target is a dir or add the
// target to the list of files
if s, err := os.Lstat(file); err == nil {
if s.Mode()&fs.ModeSymlink != 0 {
files = append(files, file)
if target, err := filepath.EvalSymlinks(file); err != nil {
return files, err
} else {
file = target
}
}
}
fileInfo, err := os.Stat(file) fileInfo, err := os.Stat(file)
if err != nil { if err != nil {
// Check if there is a Zstd-compressed version of the file if required {
fileZstd := file + ".zst" // .zst is the extension used by linux-firmware return files, fmt.Errorf("getFile: failed to stat file %q: %w", file, err)
fileInfoZstd, errZstd := os.Stat(fileZstd)
if errZstd == nil {
file = fileZstd
fileInfo = fileInfoZstd
// Unset nil so we don't retain the error from the os.Stat call for the uncompressed version.
err = nil
} else {
if required {
return files, fmt.Errorf("getFile: failed to stat file %q: %w (also tried %q: %w)", file, err, fileZstd, errZstd)
}
return files, nil
} }
return files, nil
} }
if fileInfo.IsDir() { if fileInfo.IsDir() {
@@ -139,7 +108,6 @@ func getDeps(file string, parents map[string]struct{}) (files []string, err erro
"/usr/lib", "/usr/lib",
"/lib", "/lib",
"/usr/lib/expect*", "/usr/lib/expect*",
"/usr/lib/systemd",
} }
for _, lib := range libs { for _, lib := range libs {

View File

@@ -1,167 +0,0 @@
// Copyright 2025 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package misc
import (
"os"
"path/filepath"
"reflect"
"sort"
"testing"
"time"
)
func TestGetFile(t *testing.T) {
subtests := []struct {
name string
setup func(tmpDir string) (inputPath string, expectedFiles []string, err error)
required bool
}{
{
name: "symlink to directory - no infinite recursion",
setup: func(tmpDir string) (string, []string, error) {
// Create target directory with files
targetDir := filepath.Join(tmpDir, "target")
if err := os.MkdirAll(targetDir, 0755); err != nil {
return "", nil, err
}
testFile1 := filepath.Join(targetDir, "file1.txt")
testFile2 := filepath.Join(targetDir, "file2.txt")
if err := os.WriteFile(testFile1, []byte("content1"), 0644); err != nil {
return "", nil, err
}
if err := os.WriteFile(testFile2, []byte("content2"), 0644); err != nil {
return "", nil, err
}
// Create symlink pointing to target directory
symlinkPath := filepath.Join(tmpDir, "symlink")
if err := os.Symlink(targetDir, symlinkPath); err != nil {
return "", nil, err
}
expected := []string{symlinkPath, testFile1, testFile2}
return symlinkPath, expected, nil
},
required: true,
},
{
name: "symlink to file - returns both symlink and target",
setup: func(tmpDir string) (string, []string, error) {
// Create target file
targetFile := filepath.Join(tmpDir, "target.txt")
if err := os.WriteFile(targetFile, []byte("content"), 0644); err != nil {
return "", nil, err
}
// Create symlink pointing to target file
symlinkPath := filepath.Join(tmpDir, "symlink.txt")
if err := os.Symlink(targetFile, symlinkPath); err != nil {
return "", nil, err
}
expected := []string{symlinkPath, targetFile}
return symlinkPath, expected, nil
},
required: true,
},
{
name: "regular file",
setup: func(tmpDir string) (string, []string, error) {
regularFile := filepath.Join(tmpDir, "regular.txt")
if err := os.WriteFile(regularFile, []byte("content"), 0644); err != nil {
return "", nil, err
}
expected := []string{regularFile}
return regularFile, expected, nil
},
required: true,
},
{
name: "regular directory",
setup: func(tmpDir string) (string, []string, error) {
// Create directory with files
dirPath := filepath.Join(tmpDir, "testdir")
if err := os.MkdirAll(dirPath, 0755); err != nil {
return "", nil, err
}
file1 := filepath.Join(dirPath, "file1.txt")
file2 := filepath.Join(dirPath, "subdir", "file2.txt")
if err := os.WriteFile(file1, []byte("content1"), 0644); err != nil {
return "", nil, err
}
if err := os.MkdirAll(filepath.Dir(file2), 0755); err != nil {
return "", nil, err
}
if err := os.WriteFile(file2, []byte("content2"), 0644); err != nil {
return "", nil, err
}
expected := []string{file1, file2}
return dirPath, expected, nil
},
required: true,
},
{
name: "zst compressed file fallback",
setup: func(tmpDir string) (string, []string, error) {
// Create a .zst file but NOT the original file
zstFile := filepath.Join(tmpDir, "firmware.bin.zst")
if err := os.WriteFile(zstFile, []byte("compressed content"), 0644); err != nil {
return "", nil, err
}
// Request the original file (without .zst extension)
originalFile := filepath.Join(tmpDir, "firmware.bin")
// Expected: should find and return the .zst version
expected := []string{zstFile}
return originalFile, expected, nil
},
required: true,
},
}
for _, st := range subtests {
t.Run(st.name, func(t *testing.T) {
tmpDir := t.TempDir()
inputPath, expectedFiles, err := st.setup(tmpDir)
if err != nil {
t.Fatalf("setup failed: %v", err)
}
// Add timeout protection for infinite recursion test
done := make(chan struct{})
var files []string
var getFileErr error
go func() {
defer close(done)
files, getFileErr = getFile(inputPath, st.required)
}()
select {
case <-done:
if getFileErr != nil {
t.Fatalf("getFile failed: %v", getFileErr)
}
case <-time.After(5 * time.Second):
t.Fatal("getFile appears to be in infinite recursion (timeout)")
}
// Sort for comparison
sort.Strings(expectedFiles)
sort.Strings(files)
if !reflect.DeepEqual(expectedFiles, files) {
t.Fatalf("expected: %q, got: %q", expectedFiles, files)
}
})
}
}

View File

@@ -10,39 +10,6 @@ import (
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
// Try to guess whether the system has merged dirs under /usr
func HasMergedUsr() bool {
for _, dir := range []string{"/bin", "/lib"} {
stat, err := os.Lstat(dir)
if err != nil {
// TODO: probably because the dir doesn't exist... so
// should we assume that it's because the system has some weird
// implementation of "merge /usr"?
return true
} else if stat.Mode()&os.ModeSymlink == 0 {
// Not a symlink, so must not be merged /usr
return false
}
}
return true
}
// Converts given path to one supported by a merged /usr config.
// E.g., /bin/foo becomes /usr/bin/foo, /lib/bar becomes /usr/lib/bar
// See: https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge
func MergeUsr(file string) string {
// Prepend /usr to supported paths
for _, prefix := range []string{"/bin", "/sbin", "/lib", "/lib64"} {
if strings.HasPrefix(file, prefix) {
file = filepath.Join("/usr", file)
break
}
}
return file
}
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is // Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
// absolute path // absolute path
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) { func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {

View File

@@ -1,49 +0,0 @@
// Copyright 2024 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package osutil
import (
"testing"
)
func TestMergeUsr(t *testing.T) {
subtests := []struct {
in string
expected string
}{
{
in: "/bin/foo",
expected: "/usr/bin/foo",
},
{
in: "/sbin/foo",
expected: "/usr/sbin/foo",
},
{
in: "/usr/sbin/foo",
expected: "/usr/sbin/foo",
},
{
in: "/usr/bin/foo",
expected: "/usr/bin/foo",
},
{
in: "/lib/foo.so",
expected: "/usr/lib/foo.so",
},
{
in: "/lib64/foo.so",
expected: "/usr/lib64/foo.so",
},
}
for _, st := range subtests {
t.Run(st.in, func(t *testing.T) {
out := MergeUsr(st.in)
if out != st.expected {
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
}
})
}
}

View File

@@ -4,82 +4,87 @@
package deviceinfo package deviceinfo
import ( import (
"context" "bufio"
"fmt" "fmt"
"io"
"log"
"os"
"reflect" "reflect"
"strconv"
"strings" "strings"
"time"
"github.com/mvdan/sh/shell"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
) )
type DeviceInfo struct { type DeviceInfo struct {
InitfsCompression string InitfsCompression string
InitfsExtraCompression string InitfsExtraCompression string
MesaDriver string
ModulesInitfs string
UbootBoardname string UbootBoardname string
GenerateSystemdBoot string
FormatVersion string
CreateInitfsExtra bool
} }
// Reads the relevant entries from "file" into DeviceInfo struct func ReadDeviceinfo(file string) (DeviceInfo, error) {
// Any already-set entries will be overwriten if they are present var deviceinfo DeviceInfo
// in "file"
func (d *DeviceInfo) ReadDeviceinfo(file string) error { fd, err := os.Open(file)
if exists, err := misc.Exists(file); !exists { if err != nil {
return fmt.Errorf("%q not found, required by mkinitfs", file) return deviceinfo, err
} else if err != nil { }
return fmt.Errorf("unexpected error getting status for %q: %s", file, err) defer fd.Close()
if err := unmarshal(fd, &deviceinfo); err != nil {
return deviceinfo, err
} }
if err := d.unmarshal(file); err != nil { return deviceinfo, nil
return err
}
return nil
} }
// Unmarshals a deviceinfo into a DeviceInfo struct // Unmarshals a deviceinfo into a DeviceInfo struct
func (d *DeviceInfo) unmarshal(file string) error { func unmarshal(r io.Reader, devinfo *DeviceInfo) error {
ctx, cancelCtx := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second)) s := bufio.NewScanner(r)
defer cancelCtx() for s.Scan() {
vars, err := shell.SourceFile(ctx, file) line := s.Text()
if err != nil { if strings.HasPrefix(line, "#") {
return fmt.Errorf("parsing deviceinfo %q failed: %w", file, err) continue
} }
for k, v := range vars { // line isn't setting anything, so just ignore it
fieldName := nameToField(k) if !strings.Contains(line, "=") {
field := reflect.ValueOf(d).Elem().FieldByName(fieldName) continue
}
// sometimes line has a comment at the end after setting an option
line = strings.SplitN(line, "#", 2)[0]
line = strings.TrimSpace(line)
// must support having '=' in the value (e.g. kernel cmdline)
parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
}
name, val := parts[0], parts[1]
val = strings.ReplaceAll(val, "\"", "")
if name == "deviceinfo_format_version" && val != "0" {
return fmt.Errorf("deviceinfo format version %q is not supported", val)
}
fieldName := nameToField(name)
if fieldName == "" {
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
}
field := reflect.ValueOf(devinfo).Elem().FieldByName(fieldName)
if !field.IsValid() { if !field.IsValid() {
// an option that meets the deviceinfo "specification", but isn't // an option that meets the deviceinfo "specification", but isn't
// one we care about in this module // one we care about in this module
continue continue
} }
switch field.Interface().(type) { field.SetString(val)
case string:
field.SetString(v.String())
case bool:
if v, err := strconv.ParseBool(v.String()); err != nil {
return fmt.Errorf("deviceinfo %q has unsupported type for field %q, expected 'bool'", file, k)
} else {
field.SetBool(v)
}
case int:
if v, err := strconv.ParseInt(v.String(), 10, 32); err != nil {
return fmt.Errorf("deviceinfo %q has unsupported type for field %q, expected 'int'", file, k)
} else {
field.SetInt(v)
}
default:
return fmt.Errorf("deviceinfo %q has unsupported type for field %q", file, k)
}
} }
if err := s.Err(); err != nil {
if d.FormatVersion != "0" { log.Print("unable to parse deviceinfo: ", err)
return fmt.Errorf("deviceinfo %q has an unsupported format version %q", file, d.FormatVersion) return err
} }
return nil return nil
@@ -103,25 +108,3 @@ func nameToField(name string) string {
return field return field
} }
func (d DeviceInfo) String() string {
return fmt.Sprintf(`{
%s: %v
%s: %v
%s: %v
%s: %v
%s: %v
%s: %v
%s: %v
%s: %v
}`,
"deviceinfo_format_version", d.FormatVersion,
"deviceinfo_", d.FormatVersion,
"deviceinfo_initfs_compression", d.InitfsCompression,
"deviceinfo_initfs_extra_compression", d.InitfsCompression,
"deviceinfo_ubootBoardname", d.UbootBoardname,
"deviceinfo_generateSystemdBoot", d.GenerateSystemdBoot,
"deviceinfo_formatVersion", d.FormatVersion,
"deviceinfo_createInitfsExtra", d.CreateInitfsExtra,
)
}

View File

@@ -4,32 +4,12 @@
package deviceinfo package deviceinfo
import ( import (
"fmt"
"reflect"
"strings" "strings"
"testing" "testing"
) )
// Test ReadDeviceinfo and the logic of reading from multiple files
func TestReadDeviceinfo(t *testing.T) {
compression_expected := "gz -9"
var devinfo DeviceInfo
err := devinfo.ReadDeviceinfo("./test_resources/deviceinfo-missing")
if !strings.Contains(err.Error(), "required by mkinitfs") {
t.Errorf("received an unexpected err: %s", err)
}
err = devinfo.ReadDeviceinfo("./test_resources/deviceinfo-first")
if err != nil {
t.Errorf("received an unexpected err: %s", err)
}
err = devinfo.ReadDeviceinfo("./test_resources/deviceinfo-msm")
if err != nil {
t.Errorf("received an unexpected err: %s", err)
}
if devinfo.InitfsCompression != compression_expected {
t.Errorf("expected %q, got: %q", compression_expected, devinfo.InitfsCompression)
}
}
// Test conversion of name to DeviceInfo struct field format // Test conversion of name to DeviceInfo struct field format
func TestNameToField(t *testing.T) { func TestNameToField(t *testing.T) {
tables := []struct { tables := []struct {
@@ -38,11 +18,10 @@ func TestNameToField(t *testing.T) {
}{ }{
{"deviceinfo_dtb", "Dtb"}, {"deviceinfo_dtb", "Dtb"},
{"dtb", "Dtb"}, {"dtb", "Dtb"},
{"deviceinfo_initfs_compression", "InitfsCompression"}, {"deviceinfo_modules_initfs", "ModulesInitfs"},
{"modules_initfs", "ModulesInitfs"}, {"modules_initfs", "ModulesInitfs"},
{"deviceinfo_initfs_compression___", "InitfsCompression"}, {"deviceinfo_modules_initfs___", "ModulesInitfs"},
{"deviceinfo_initfs_extra_compression", "InitfsExtraCompression"}, {"deviceinfo_initfs_extra_compression", "InitfsExtraCompression"},
{"deviceinfo_create_initfs_extra", "CreateInitfsExtra"},
} }
for _, table := range tables { for _, table := range tables {
@@ -58,25 +37,39 @@ func TestUnmarshal(t *testing.T) {
tables := []struct { tables := []struct {
// field is just used for reflection within the test, so it must be a // field is just used for reflection within the test, so it must be a
// valid DeviceInfo field // valid DeviceInfo field
file string field string
expected DeviceInfo in string
expected string
}{ }{
{"./test_resources/deviceinfo-unmarshal-1", DeviceInfo{ {"ModulesInitfs", "deviceinfo_modules_initfs=\"panfrost foo bar bazz\"\n", "panfrost foo bar bazz"},
FormatVersion: "0", {"ModulesInitfs", "deviceinfo_modules_initfs=\"panfrost foo bar bazz\"", "panfrost foo bar bazz"},
UbootBoardname: "foobar-bazz", // line with multiple '='
InitfsCompression: "zstd:--foo=1 -T0 --bar=bazz", {"InitfsCompression", "deviceinfo_initfs_compression=zstd:--foo=1 -T0 --bar=bazz", "zstd:--foo=1 -T0 --bar=bazz"},
InitfsExtraCompression: "", // empty option
CreateInitfsExtra: true, {"ModulesInitfs", "deviceinfo_modules_initfs=\"\"\n", ""},
}, // line with comment at the end
}, {"MesaDriver", "deviceinfo_mesa_driver=\"panfrost\" # this is a nice driver", "panfrost"},
{"", "# this is a comment!\n", ""},
// empty lines are fine
{"", "", ""},
// line with whitepace characters only
{"", " \t \n\r", ""},
} }
var d DeviceInfo var d DeviceInfo
for _, table := range tables { for _, table := range tables {
if err := d.unmarshal(table.file); err != nil { testName := fmt.Sprintf("unmarshal::'%s':", strings.ReplaceAll(table.in, "\n", "\\n"))
t.Error(err) if err := unmarshal(strings.NewReader(table.in), &d); err != nil {
t.Errorf("%s received an unexpected err: ", err)
} }
if d != table.expected {
t.Errorf("expected: %s, got: %s", table.expected, d) // Check against expected value
field := reflect.ValueOf(&d).Elem().FieldByName(table.field)
out := ""
if table.field != "" {
out = field.String()
}
if out != table.expected {
t.Errorf("%s expected: %q, got: %q", testName, table.expected, out)
} }
} }

View File

@@ -1,3 +0,0 @@
deviceinfo_format_version="0"
deviceinfo_initfs_compression="gz -9"
deviceinfo_mesa_driver="panfrost"

View File

@@ -1,2 +0,0 @@
deviceinfo_format_version="0"
deviceinfo_mesa_driver="msm"

View File

@@ -1,7 +0,0 @@
deviceinfo_format_version="0"
deviceinfo_uboot_boardname="foobar-bazz"
# line with multiple =
deviceinfo_initfs_compression="zstd:--foo=1 -T0 --bar=bazz"
# empty option
deviceinfo_initfs_extra_compression=""
deviceinfo_create_initfs_extra="true" # in-line comment that should be ignored