Compare commits

1 Commits

Author SHA1 Message Date
jane400
20ba9e4131 misc: also check wheter binaries from /{s,}bin are /usr
This should hopefully avoid failing to build the initramfs
when important binaries like /bin/busybox get moved to /usr.

Also complain loudly when there's a path mismatch, so people notice
it.
2025-02-04 15:07:16 +01:00
21 changed files with 246 additions and 656 deletions

View File

@@ -6,126 +6,43 @@ image: alpine:edge
variables:
GOFLAGS: "-buildvcs=false"
PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/mkinitfs-vendor-${CI_COMMIT_TAG}/${CI_COMMIT_TAG}"
CI_TRON_TEMPLATE_PROJECT: &ci-tron-template-project postmarketOS/ci-common
CI_TRON_JOB_TEMPLATE_PROJECT_URL: $CI_SERVER_URL/$CI_TRON_TEMPLATE_PROJECT
CI_TRON_JOB_TEMPLATE_COMMIT: &ci-tron-template-commit 7c95b5f2d53533e8722abf57c73e558168e811f3
include:
- project: *ci-tron-template-project
ref: *ci-tron-template-commit
file: '/ci-tron/common.yml'
stages:
- lint
- build
- hardware tests
- vendor
- release
workflow:
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- if: $CI_COMMIT_BRANCH == 'master'
- if: '$CI_COMMIT_TAG != null'
# defaults for "only"
# We need to run the CI jobs in a "merge request specific context", if CI is
# running in a merge request. Otherwise the environment variable that holds the
# merge request ID is not available. This means, we must set the "only"
# variable accordingly - and if we only do it for one job, all other jobs will
# not get executed. So have the defaults here, and use them in all jobs that
# should run on both the master branch, and in merge requests.
# https://docs.gitlab.com/ee/ci/merge_request_pipelines/index.html#excluding-certain-jobs
.only-default: &only-default
only:
- master
- merge_requests
- tags
build:
stage: build
variables:
GOTEST: "gotestsum --junitfile report.xml --format testname -- ./..."
parallel:
matrix:
- TAG: shared
- TAG: arm64
tags:
- $TAG
<<: *only-default
before_script:
- apk -q add go gotestsum staticcheck make scdoc
- apk -q add go staticcheck make scdoc
script:
- make test
- make
after_script:
- mkdir -p rootfs/usr/sbin
- cp mkinitfs rootfs/usr/sbin
artifacts:
expire_in: 1 week
reports:
junit: report.xml
paths:
- rootfs
.qemu-common:
variables:
DEVICE_NAME: qemu-$CPU_ARCH
KERNEL_VARIANT: lts
rules:
- if: '$CI_COMMIT_TAG != null'
when: never
.build-ci-tron-qemu:
stage: hardware tests
extends:
- .pmos-ci-tron-build-boot-artifacts
- .qemu-common
variables:
INSTALL_PACKAGES: device-${DEVICE_NAME} device-${DEVICE_NAME}-kernel-${KERNEL_VARIANT} postmarketos-mkinitfs-hook-ci
build-ci-tron-qemu-amd64:
extends:
- .build-ci-tron-qemu
needs:
- job: "build"
parallel:
matrix:
- TAG: shared
variables:
CPU_ARCH: amd64
build-ci-tron-qemu-aarch64:
extends:
- .build-ci-tron-qemu
needs:
- job: "build"
parallel:
matrix:
- TAG: arm64
variables:
CPU_ARCH: aarch64
.test-ci-tron-qemu:
stage: hardware tests
extends:
- .pmos-ci-tron-initramfs-test
- .qemu-common
dependencies: []
variables:
CI_TRON_KERNEL__URL: "glartifact://build-ci-tron-qemu-$CPU_ARCH/${CI_TRON__PMB_EXPORT_PATH}/vmlinuz-${KERNEL_VARIANT}"
CI_TRON_INITRAMFS__INITRAMFS__URL: "glartifact://build-ci-tron-qemu-$CPU_ARCH/${CI_TRON__PMB_EXPORT_PATH}/initramfs"
CI_TRON_KERNEL_CMDLINE__DEVICEINFO: 'console=tty1 console=ttyS0,115200 PMOS_FORCE_PARTITION_RESIZE'
test-ci-tron-qemu-amd64:
extends:
- .test-ci-tron-qemu
- .pmos-ci-tron-runner-qemu-amd64
needs:
- job: 'build-ci-tron-qemu-amd64'
artifacts: false
variables:
CPU_ARCH: amd64
test-ci-tron-qemu-aarch64:
extends:
- .test-ci-tron-qemu
- .pmos-ci-tron-runner-qemu-aarch64
needs:
- job: 'build-ci-tron-qemu-aarch64'
artifacts: false
variables:
CPU_ARCH: aarch64
vendor:
stage: vendor
image: alpine:latest
rules:
- if: '$CI_COMMIT_TAG != null'
only:
- tags
before_script:
- apk -q add curl go make
script:
@@ -137,8 +54,8 @@ vendor:
release:
stage: release
image: registry.gitlab.com/gitlab-org/release-cli:latest
rules:
- if: '$CI_COMMIT_TAG != null'
only:
- tags
script:
- |
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \

View File

@@ -12,13 +12,7 @@ GO?=go
GOFLAGS?=
LDFLAGS+=-s -w -X main.Version=$(VERSION)
RM?=rm -f
GOTESTOPTS?=-count=1 -race
GOTEST?=go test ./...
DISABLE_GOGC?=
ifeq ($(DISABLE_GOGC),1)
LDFLAGS+=-X main.DisableGC=true
endif
GOTEST=go test -count=1 -race
GOSRC!=find * -name '*.go'
GOSRC+=go.mod go.sum
@@ -48,10 +42,10 @@ test:
fi
@staticcheck ./...
$(GOTEST) $(GOTESTOPTS)
@$(GOTEST) ./...
clean:
$(RM) mkinitfs $(DOCS)
$(RM) mkinitfs $(DOCS)
$(RM) $(VENDORED)*
install: $(DOCS) mkinitfs

View File

@@ -9,8 +9,6 @@ import (
"log"
"os"
"path/filepath"
"runtime/debug"
"strings"
"time"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/archive"
@@ -28,14 +26,8 @@ import (
// set at build time
var Version string
var DisableGC string
func main() {
// To allow working around silly GC-related issues, like https://gitlab.com/qemu-project/qemu/-/issues/2560
if strings.ToLower(DisableGC) == "true" {
debug.SetGCPercent(-1)
}
retCode := 0
defer func() { os.Exit(retCode) }()
@@ -111,38 +103,15 @@ func main() {
hookfiles.New("/etc/mkinitfs/files"),
hookscripts.New("/usr/share/mkinitfs/hooks", "/hooks"),
hookscripts.New("/etc/mkinitfs/hooks", "/hooks"),
hookscripts.New("/usr/share/mkinitfs/hooks-cleanup", "/hooks-cleanup"),
hookscripts.New("/etc/mkinitfs/hooks-cleanup", "/hooks-cleanup"),
modules.New("/usr/share/mkinitfs/modules"),
modules.New("/etc/mkinitfs/modules"),
})
initfsExtra := initramfs.New([]filelist.FileLister{
hookfiles.New("/usr/share/mkinitfs/files-extra"),
hookfiles.New("/etc/mkinitfs/files-extra"),
hookscripts.New("/usr/share/mkinitfs/hooks-extra", "/hooks-extra"),
hookscripts.New("/etc/mkinitfs/hooks-extra", "/hooks-extra"),
modules.New("/usr/share/mkinitfs/modules-extra"),
modules.New("/etc/mkinitfs/modules-extra"),
})
if err := initramfsAr.AddItems(initfs); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs")
retCode = 1
return
}
// Include initramfs-extra files in the initramfs if not making a separate
// archive
if !devinfo.CreateInitfsExtra {
if err := initramfsAr.AddItems(initfsExtra); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs")
retCode = 1
return
}
}
if err := initramfsAr.Write(filepath.Join(workDir, "initramfs"), os.FileMode(0644)); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs")
@@ -151,31 +120,37 @@ func main() {
}
misc.TimeFunc(start, "initramfs")
if devinfo.CreateInitfsExtra {
//
// initramfs-extra
//
// deviceinfo.InitfsExtraCompression needs a little more post-processing
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
log.Printf("== Generating %s ==\n", "initramfs-extra")
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
//
// initramfs-extra
//
// deviceinfo.InitfsExtraCompression needs a little more post-processing
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
log.Printf("== Generating %s ==\n", "initramfs-extra")
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
start = time.Now()
initramfsExtraAr := archive.New(compressionFormat, compressionLevel)
if err := initramfsExtraAr.AddItemsExclude(initfsExtra, initfs); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs-extra")
retCode = 1
return
}
if err := initramfsExtraAr.Write(filepath.Join(workDir, "initramfs-extra"), os.FileMode(0644)); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs-extra")
retCode = 1
return
}
misc.TimeFunc(start, "initramfs-extra")
start = time.Now()
initramfsExtraAr := archive.New(compressionFormat, compressionLevel)
initfsExtra := initramfs.New([]filelist.FileLister{
hookfiles.New("/usr/share/mkinitfs/files-extra"),
hookfiles.New("/etc/mkinitfs/files-extra"),
hookscripts.New("/usr/share/mkinitfs/hooks-extra", "/hooks-extra"),
hookscripts.New("/etc/mkinitfs/hooks-extra", "/hooks-extra"),
modules.New("/usr/share/mkinitfs/modules-extra"),
modules.New("/etc/mkinitfs/modules-extra"),
})
if err := initramfsExtraAr.AddItemsExclude(initfsExtra, initfs); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs-extra")
retCode = 1
return
}
if err := initramfsExtraAr.Write(filepath.Join(workDir, "initramfs-extra"), os.FileMode(0644)); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs-extra")
retCode = 1
return
}
misc.TimeFunc(start, "initramfs-extra")
// Final processing of initramfs / kernel is done by boot-deploy
if !disableBootDeploy {

View File

@@ -42,7 +42,6 @@ mkinitfs reads deviceinfo values from */usr/share/deviceinfo/deviceinfo* and
*/etc/deviceinfo*, in that order. The following variables
are *required* by mkinitfs:
- deviceinfo_create_initfs_extra
- deviceinfo_generate_systemd_boot
- deviceinfo_initfs_compression
- deviceinfo_initfs_extra_compression
@@ -134,8 +133,7 @@ create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, an
skipped.
## /usr/share/mkinitfs/hooks, /etc/mkinitfs/hooks
## /usr/share/mkinitfs/hooks-cleanup, /etc/mkinitfs/hooks-cleanup
## /usr/share/mkinitfs/hooks-extra, /etc/mkinitfs/hooks-extra
## /usr/share/mkinitfs/hooks-extra*, /etc/mkinitfs/hooks-extra
Any files listed under these directories are copied as-is into the
relevant archives. Hooks are generally script files, but how they are
@@ -148,7 +146,7 @@ create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, an
## /usr/share/mkinitfs/modules, /etc/mkinitfs/modules
## /usr/share/mkinitfs/modules-extra, /etc/mkinitfs/modules-extra
Files with the *.modules* extension in these directories are lists of
Files with the *.modules* extention in these directories are lists of
kernel modules to include in the initramfs. Individual modules and
directories can be listed in the files here. Globbing is also supported.

10
go.mod
View File

@@ -7,13 +7,5 @@ require (
github.com/klauspost/compress v1.15.12
github.com/pierrec/lz4/v4 v4.1.17
github.com/ulikunitz/xz v0.5.10
golang.org/x/sys v0.18.0
)
require (
github.com/mvdan/sh v2.6.4+incompatible // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/term v0.18.0 // indirect
mvdan.cc/sh v2.6.4+incompatible // indirect
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
)

12
go.sum
View File

@@ -2,21 +2,9 @@ github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RS
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/mvdan/sh v2.6.4+incompatible h1:D4oEWW0J8cL7zeQkrXw76IAYXF0mJfDaBwjgzmKb6zs=
github.com/mvdan/sh v2.6.4+incompatible/go.mod h1:kipHzrJQZEDCMTNRVRAlMMFjqHEYrthfIlFkJSrmDZE=
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
mvdan.cc/sh v2.6.4+incompatible h1:eD6tDeh0pw+/TOTI1BBEryZ02rD2nMcFsgcvde7jffM=
mvdan.cc/sh v2.6.4+incompatible/go.mod h1:IeeQbZq+x2SUGBensq/jge5lLQbS3XT2ktyp3wrt4x8=

View File

@@ -237,10 +237,7 @@ func (archive *Archive) AddItemsExclude(flister filelist.FileLister, exclude fil
// Adds the given file or directory at "source" to the archive at "dest"
func (archive *Archive) AddItem(source string, dest string) error {
if osutil.HasMergedUsr() {
source = osutil.MergeUsr(source)
dest = osutil.MergeUsr(dest)
}
sourceStat, err := os.Lstat(source)
if err != nil {
e, ok := err.(*os.PathError)
@@ -251,12 +248,6 @@ func (archive *Archive) AddItem(source string, dest string) error {
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
}
// A symlink to a directory doesn't have the os.ModeDir bit set, so we need
// to check if it's a symlink first
if sourceStat.Mode()&os.ModeSymlink != 0 {
return archive.addSymlink(source, dest)
}
if sourceStat.Mode()&os.ModeDir != 0 {
return archive.addDir(dest)
}
@@ -264,45 +255,6 @@ func (archive *Archive) AddItem(source string, dest string) error {
return archive.addFile(source, dest)
}
func (archive *Archive) addSymlink(source string, dest string) error {
target, err := os.Readlink(source)
if err != nil {
log.Print("addSymlink: failed to get symlink target for: ", source)
return err
}
// Make sure we pick up the symlink target too
targetAbs := target
if filepath.Dir(target) == "." {
// relative symlink, make it absolute so we can add the target to the archive
targetAbs = filepath.Join(filepath.Dir(source), target)
}
if !filepath.IsAbs(targetAbs) {
targetAbs, err = osutil.RelativeSymlinkTargetToDir(targetAbs, filepath.Dir(source))
if err != nil {
return err
}
}
archive.AddItem(targetAbs, targetAbs)
// Now add the symlink itself
destFilename := strings.TrimPrefix(dest, "/")
archive.items.add(archiveItem{
sourcePath: source,
header: &cpio.Header{
Name: destFilename,
Linkname: target,
Mode: 0644 | cpio.ModeSymlink,
Size: int64(len(target)),
},
})
return nil
}
func (archive *Archive) addFile(source string, dest string) error {
if err := archive.addDir(filepath.Dir(dest)); err != nil {
return err
@@ -314,6 +266,42 @@ func (archive *Archive) addFile(source string, dest string) error {
return err
}
// Symlink: write symlink to archive then set 'file' to link target
if sourceStat.Mode()&os.ModeSymlink != 0 {
// log.Printf("File %q is a symlink", file)
target, err := os.Readlink(source)
if err != nil {
log.Print("addFile: failed to get symlink target: ", source)
return err
}
destFilename := strings.TrimPrefix(dest, "/")
archive.items.add(archiveItem{
sourcePath: source,
header: &cpio.Header{
Name: destFilename,
Linkname: target,
Mode: 0644 | cpio.ModeSymlink,
Size: int64(len(target)),
// Checksum: 1,
},
})
if filepath.Dir(target) == "." {
target = filepath.Join(filepath.Dir(source), target)
}
// make sure target is an absolute path
if !filepath.IsAbs(target) {
target, err = osutil.RelativeSymlinkTargetToDir(target, filepath.Dir(source))
if err != nil {
return err
}
}
err = archive.addFile(target, target)
return err
}
destFilename := strings.TrimPrefix(dest, "/")
archive.items.add(archiveItem{
@@ -416,12 +404,6 @@ func (archive *Archive) writeCompressed(path string, mode os.FileMode) (err erro
}
func (archive *Archive) writeCpio() error {
// Just in case
if osutil.HasMergedUsr() {
archive.addSymlink("/bin", "/bin")
archive.addSymlink("/sbin", "/sbin")
archive.addSymlink("/lib", "/lib")
}
// having a transient function for actually adding files to the archive
// allows the deferred fd.close to run after every copy and prevent having
// tons of open file handles until the copying is all done
@@ -436,19 +418,19 @@ func (archive *Archive) writeCpio() error {
if header.Mode.IsRegular() {
fd, err := os.Open(source)
if err != nil {
return fmt.Errorf("archive.writeCpio: Unable to open file %q, %w", source, err)
return fmt.Errorf("archive.writeCpio: uname to open file %q, %w", source, err)
}
defer fd.Close()
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
return fmt.Errorf("archive.writeCpio: Couldn't process %q: %w", source, err)
return fmt.Errorf("archive.writeCpio: unable to write out archive: %w", err)
}
} else if header.Linkname != "" {
// the contents of a symlink is just need the link name
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %q -> %q: %w", source, header.Linkname, err)
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %w", err)
}
} else {
return fmt.Errorf("archive.writeCpio: unknown type for file: %q: %d", source, header.Mode)
return fmt.Errorf("archive.writeCpio: unknown type for file: %s", source)
}
}

View File

@@ -78,17 +78,12 @@ func (b *BootDeploy) Run() error {
}
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
args := []string{
cmd := exec.Command("boot-deploy",
"-i", "initramfs",
"-k", kernFilename,
"-d", b.inDir,
"-o", b.outDir,
}
if b.devinfo.CreateInitfsExtra {
args = append(args, "initramfs-extra")
}
cmd := exec.Command("boot-deploy", args...)
"initramfs-extra")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

View File

@@ -44,7 +44,7 @@ func (h *HookDirs) List() (*filelist.FileList, error) {
s := bufio.NewScanner(f)
for s.Scan() {
dir := strings.TrimSpace(s.Text())
dir := s.Text()
if len(dir) == 0 || strings.HasPrefix(dir, "#") {
continue
}

View File

@@ -11,7 +11,6 @@ import (
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
)
type HookFiles struct {
@@ -59,15 +58,12 @@ func slurpFiles(fd io.Reader) (*filelist.FileList, error) {
s := bufio.NewScanner(fd)
for s.Scan() {
line := strings.TrimSpace(s.Text())
line := s.Text()
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue
}
src, dest, has_dest := strings.Cut(line, ":")
if osutil.HasMergedUsr() {
src = osutil.MergeUsr(src)
}
fFiles, err := misc.GetFiles([]string{src}, true)
if err != nil {

View File

@@ -33,14 +33,8 @@ func (m *Modules) List() (*filelist.FileList, error) {
}
files := filelist.NewFileList()
libDir := "/usr/lib/modules"
if exists, err := misc.Exists(libDir); !exists {
libDir = "/lib/modules"
} else if err != nil {
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", libDir, err)
}
modDir := filepath.Join(libDir, kernVer)
modDir := filepath.Join("/lib/modules", kernVer)
if exists, err := misc.Exists(modDir); !exists {
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
@@ -83,7 +77,7 @@ func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
files := filelist.NewFileList()
s := bufio.NewScanner(fd)
for s.Scan() {
line := strings.TrimSpace(s.Text())
line := s.Text()
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue
}
@@ -103,8 +97,8 @@ func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
}
} else if dir == "" {
// item is a module name
if modFilelist, err := getModule(line, modDir); err != nil {
return nil, fmt.Errorf("unable to get module file %q: %w", line, err)
if modFilelist, err := getModule(s.Text(), modDir); err != nil {
return nil, fmt.Errorf("unable to get module file %q: %w", s.Text(), err)
} else {
for _, file := range modFilelist {
files.Add(file, file)
@@ -188,7 +182,7 @@ func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
s := bufio.NewScanner(modulesDep)
for s.Scan() {
line := strings.TrimSpace(s.Text())
line := s.Text()
if len(line) == 0 || strings.HasPrefix(line, "#") {
continue
}

View File

@@ -18,7 +18,6 @@ func TestStripExts(t *testing.T) {
{"another_file", "another_file"},
{"a.b.c.d.e.f.g.h.i", "a"},
{"virtio_blk.ko", "virtio_blk"},
{"virtio_blk.ko ", "virtio_blk"},
}
for _, table := range tables {
out := stripExts(table.in)

View File

@@ -3,9 +3,10 @@ package misc
import (
"debug/elf"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"log"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
)
@@ -23,57 +24,51 @@ func GetFiles(list []string, required bool) (files []string, err error) {
return
}
func getFile(file string, required bool) (files []string, err error) {
// Expand glob expression
expanded, err := filepath.Glob(file)
if err != nil {
return
}
if len(expanded) > 0 && expanded[0] != file {
for _, path := range expanded {
if globFiles, err := getFile(path, required); err != nil {
return files, err
} else {
files = append(files, globFiles...)
}
}
return RemoveDuplicates(files), nil
}
// If the file is a symlink we need to do this to prevent an infinite recursion
// loop:
// Symlinks need special handling to prevent infinite recursion:
// 1) add the symlink to the list of files
// 2) set file to dereferenced target
// 4) continue this function to either walk it if the target is a dir or add the
// target to the list of files
if s, err := os.Lstat(file); err == nil {
if s.Mode()&fs.ModeSymlink != 0 {
files = append(files, file)
if target, err := filepath.EvalSymlinks(file); err != nil {
return files, err
} else {
file = target
}
}
}
// This function doesn't handle globs, use getFile() instead.
func getFileNormalized(file string, required bool) (files []string, err error) {
fileInfo, err := os.Stat(file)
// Trying some fallbacks...
if err != nil {
// Check if there is a Zstd-compressed version of the file
fileZstd := file + ".zst" // .zst is the extension used by linux-firmware
fileInfoZstd, errZstd := os.Stat(fileZstd)
type triedResult struct {
file string
err error
}
if errZstd == nil {
file = fileZstd
fileInfo = fileInfoZstd
// Unset nil so we don't retain the error from the os.Stat call for the uncompressed version.
err = nil
} else {
if required {
return files, fmt.Errorf("getFile: failed to stat file %q: %w (also tried %q: %w)", file, err, fileZstd, errZstd)
triedFiles := make([]triedResult, 0, 1)
// Temporary fallback until alpine/pmOS usr-merge happened
// If a path starts with /bin or /sbin, also try /usr equivalent before giving up
if strings.HasPrefix(file, "/bin/") || strings.HasPrefix(file, "/sbin/") {
fileUsr := filepath.Join("/usr", file)
_, err := os.Stat(fileUsr);
if err == nil {
log.Printf("getFile: failed to find %q, but found it in %q. Please adjust the path.", file, fileUsr)
return getFileNormalized(fileUsr, required)
} else {
triedFiles = append(triedFiles, triedResult{fileUsr, err})
}
}
{
// Check if there is a Zstd-compressed version of the file
fileZstd := file + ".zst" // .zst is the extension used by linux-firmware
_, err := os.Stat(fileZstd);
if err == nil {
return getFileNormalized(fileZstd, required)
} else {
triedFiles = append(triedFiles, triedResult{fileZstd, err})
}
}
// Failed to find anything
if required {
failStrings := make([]string, 0, 2)
for _, result := range triedFiles {
failStrings = append(failStrings, fmt.Sprintf("\n - also tried %q: %v", result.file, result.err))
}
return files, fmt.Errorf("getFile: failed to stat file %q: %v%q", file, err, strings.Join(failStrings, ""))
} else {
return files, nil
}
}
@@ -114,6 +109,26 @@ func getFile(file string, required bool) (files []string, err error) {
return
}
func getFile(file string, required bool) (files []string, err error) {
// Expand glob expression
expanded, err := filepath.Glob(file)
if err != nil {
return
}
if len(expanded) > 0 && expanded[0] != file {
for _, path := range expanded {
if globFiles, err := getFile(path, required); err != nil {
return files, err
} else {
files = append(files, globFiles...)
}
}
return RemoveDuplicates(files), nil
}
return getFileNormalized(file, required)
}
func getDeps(file string, parents map[string]struct{}) (files []string, err error) {
if _, found := parents[file]; found {
@@ -139,7 +154,6 @@ func getDeps(file string, parents map[string]struct{}) (files []string, err erro
"/usr/lib",
"/lib",
"/usr/lib/expect*",
"/usr/lib/systemd",
}
for _, lib := range libs {

View File

@@ -1,167 +0,0 @@
// Copyright 2025 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package misc
import (
"os"
"path/filepath"
"reflect"
"sort"
"testing"
"time"
)
func TestGetFile(t *testing.T) {
subtests := []struct {
name string
setup func(tmpDir string) (inputPath string, expectedFiles []string, err error)
required bool
}{
{
name: "symlink to directory - no infinite recursion",
setup: func(tmpDir string) (string, []string, error) {
// Create target directory with files
targetDir := filepath.Join(tmpDir, "target")
if err := os.MkdirAll(targetDir, 0755); err != nil {
return "", nil, err
}
testFile1 := filepath.Join(targetDir, "file1.txt")
testFile2 := filepath.Join(targetDir, "file2.txt")
if err := os.WriteFile(testFile1, []byte("content1"), 0644); err != nil {
return "", nil, err
}
if err := os.WriteFile(testFile2, []byte("content2"), 0644); err != nil {
return "", nil, err
}
// Create symlink pointing to target directory
symlinkPath := filepath.Join(tmpDir, "symlink")
if err := os.Symlink(targetDir, symlinkPath); err != nil {
return "", nil, err
}
expected := []string{symlinkPath, testFile1, testFile2}
return symlinkPath, expected, nil
},
required: true,
},
{
name: "symlink to file - returns both symlink and target",
setup: func(tmpDir string) (string, []string, error) {
// Create target file
targetFile := filepath.Join(tmpDir, "target.txt")
if err := os.WriteFile(targetFile, []byte("content"), 0644); err != nil {
return "", nil, err
}
// Create symlink pointing to target file
symlinkPath := filepath.Join(tmpDir, "symlink.txt")
if err := os.Symlink(targetFile, symlinkPath); err != nil {
return "", nil, err
}
expected := []string{symlinkPath, targetFile}
return symlinkPath, expected, nil
},
required: true,
},
{
name: "regular file",
setup: func(tmpDir string) (string, []string, error) {
regularFile := filepath.Join(tmpDir, "regular.txt")
if err := os.WriteFile(regularFile, []byte("content"), 0644); err != nil {
return "", nil, err
}
expected := []string{regularFile}
return regularFile, expected, nil
},
required: true,
},
{
name: "regular directory",
setup: func(tmpDir string) (string, []string, error) {
// Create directory with files
dirPath := filepath.Join(tmpDir, "testdir")
if err := os.MkdirAll(dirPath, 0755); err != nil {
return "", nil, err
}
file1 := filepath.Join(dirPath, "file1.txt")
file2 := filepath.Join(dirPath, "subdir", "file2.txt")
if err := os.WriteFile(file1, []byte("content1"), 0644); err != nil {
return "", nil, err
}
if err := os.MkdirAll(filepath.Dir(file2), 0755); err != nil {
return "", nil, err
}
if err := os.WriteFile(file2, []byte("content2"), 0644); err != nil {
return "", nil, err
}
expected := []string{file1, file2}
return dirPath, expected, nil
},
required: true,
},
{
name: "zst compressed file fallback",
setup: func(tmpDir string) (string, []string, error) {
// Create a .zst file but NOT the original file
zstFile := filepath.Join(tmpDir, "firmware.bin.zst")
if err := os.WriteFile(zstFile, []byte("compressed content"), 0644); err != nil {
return "", nil, err
}
// Request the original file (without .zst extension)
originalFile := filepath.Join(tmpDir, "firmware.bin")
// Expected: should find and return the .zst version
expected := []string{zstFile}
return originalFile, expected, nil
},
required: true,
},
}
for _, st := range subtests {
t.Run(st.name, func(t *testing.T) {
tmpDir := t.TempDir()
inputPath, expectedFiles, err := st.setup(tmpDir)
if err != nil {
t.Fatalf("setup failed: %v", err)
}
// Add timeout protection for infinite recursion test
done := make(chan struct{})
var files []string
var getFileErr error
go func() {
defer close(done)
files, getFileErr = getFile(inputPath, st.required)
}()
select {
case <-done:
if getFileErr != nil {
t.Fatalf("getFile failed: %v", getFileErr)
}
case <-time.After(5 * time.Second):
t.Fatal("getFile appears to be in infinite recursion (timeout)")
}
// Sort for comparison
sort.Strings(expectedFiles)
sort.Strings(files)
if !reflect.DeepEqual(expectedFiles, files) {
t.Fatalf("expected: %q, got: %q", expectedFiles, files)
}
})
}
}

View File

@@ -10,39 +10,6 @@ import (
"golang.org/x/sys/unix"
)
// Try to guess whether the system has merged dirs under /usr
func HasMergedUsr() bool {
for _, dir := range []string{"/bin", "/lib"} {
stat, err := os.Lstat(dir)
if err != nil {
// TODO: probably because the dir doesn't exist... so
// should we assume that it's because the system has some weird
// implementation of "merge /usr"?
return true
} else if stat.Mode()&os.ModeSymlink == 0 {
// Not a symlink, so must not be merged /usr
return false
}
}
return true
}
// Converts given path to one supported by a merged /usr config.
// E.g., /bin/foo becomes /usr/bin/foo, /lib/bar becomes /usr/lib/bar
// See: https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge
func MergeUsr(file string) string {
// Prepend /usr to supported paths
for _, prefix := range []string{"/bin", "/sbin", "/lib", "/lib64"} {
if strings.HasPrefix(file, prefix) {
file = filepath.Join("/usr", file)
break
}
}
return file
}
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
// absolute path
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {

View File

@@ -1,49 +0,0 @@
// Copyright 2024 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package osutil
import (
"testing"
)
func TestMergeUsr(t *testing.T) {
subtests := []struct {
in string
expected string
}{
{
in: "/bin/foo",
expected: "/usr/bin/foo",
},
{
in: "/sbin/foo",
expected: "/usr/sbin/foo",
},
{
in: "/usr/sbin/foo",
expected: "/usr/sbin/foo",
},
{
in: "/usr/bin/foo",
expected: "/usr/bin/foo",
},
{
in: "/lib/foo.so",
expected: "/usr/lib/foo.so",
},
{
in: "/lib64/foo.so",
expected: "/usr/lib64/foo.so",
},
}
for _, st := range subtests {
t.Run(st.in, func(t *testing.T) {
out := MergeUsr(st.in)
if out != st.expected {
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
}
})
}
}

View File

@@ -4,14 +4,14 @@
package deviceinfo
import (
"context"
"bufio"
"fmt"
"io"
"log"
"os"
"reflect"
"strconv"
"strings"
"time"
"github.com/mvdan/sh/shell"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
)
@@ -20,8 +20,6 @@ type DeviceInfo struct {
InitfsExtraCompression string
UbootBoardname string
GenerateSystemdBoot string
FormatVersion string
CreateInitfsExtra bool
}
// Reads the relevant entries from "file" into DeviceInfo struct
@@ -34,7 +32,13 @@ func (d *DeviceInfo) ReadDeviceinfo(file string) error {
return fmt.Errorf("unexpected error getting status for %q: %s", file, err)
}
if err := d.unmarshal(file); err != nil {
fd, err := os.Open(file)
if err != nil {
return err
}
defer fd.Close()
if err := d.unmarshal(fd); err != nil {
return err
}
@@ -42,44 +46,53 @@ func (d *DeviceInfo) ReadDeviceinfo(file string) error {
}
// Unmarshals a deviceinfo into a DeviceInfo struct
func (d *DeviceInfo) unmarshal(file string) error {
ctx, cancelCtx := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
defer cancelCtx()
vars, err := shell.SourceFile(ctx, file)
if err != nil {
return fmt.Errorf("parsing deviceinfo %q failed: %w", file, err)
}
func (d *DeviceInfo) unmarshal(r io.Reader) error {
s := bufio.NewScanner(r)
for s.Scan() {
line := s.Text()
if strings.HasPrefix(line, "#") {
continue
}
// line isn't setting anything, so just ignore it
if !strings.Contains(line, "=") {
continue
}
// sometimes line has a comment at the end after setting an option
line = strings.SplitN(line, "#", 2)[0]
line = strings.TrimSpace(line)
// must support having '=' in the value (e.g. kernel cmdline)
parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
}
name, val := parts[0], parts[1]
val = strings.ReplaceAll(val, "\"", "")
if name == "deviceinfo_format_version" && val != "0" {
return fmt.Errorf("deviceinfo format version %q is not supported", val)
}
fieldName := nameToField(name)
if fieldName == "" {
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
}
for k, v := range vars {
fieldName := nameToField(k)
field := reflect.ValueOf(d).Elem().FieldByName(fieldName)
if !field.IsValid() {
// an option that meets the deviceinfo "specification", but isn't
// one we care about in this module
continue
}
switch field.Interface().(type) {
case string:
field.SetString(v.String())
case bool:
if v, err := strconv.ParseBool(v.String()); err != nil {
return fmt.Errorf("deviceinfo %q has unsupported type for field %q, expected 'bool'", file, k)
} else {
field.SetBool(v)
}
case int:
if v, err := strconv.ParseInt(v.String(), 10, 32); err != nil {
return fmt.Errorf("deviceinfo %q has unsupported type for field %q, expected 'int'", file, k)
} else {
field.SetInt(v)
}
default:
return fmt.Errorf("deviceinfo %q has unsupported type for field %q", file, k)
}
field.SetString(val)
}
if d.FormatVersion != "0" {
return fmt.Errorf("deviceinfo %q has an unsupported format version %q", file, d.FormatVersion)
if err := s.Err(); err != nil {
log.Print("unable to parse deviceinfo: ", err)
return err
}
return nil
@@ -103,25 +116,3 @@ func nameToField(name string) string {
return field
}
func (d DeviceInfo) String() string {
return fmt.Sprintf(`{
%s: %v
%s: %v
%s: %v
%s: %v
%s: %v
%s: %v
%s: %v
%s: %v
}`,
"deviceinfo_format_version", d.FormatVersion,
"deviceinfo_", d.FormatVersion,
"deviceinfo_initfs_compression", d.InitfsCompression,
"deviceinfo_initfs_extra_compression", d.InitfsCompression,
"deviceinfo_ubootBoardname", d.UbootBoardname,
"deviceinfo_generateSystemdBoot", d.GenerateSystemdBoot,
"deviceinfo_formatVersion", d.FormatVersion,
"deviceinfo_createInitfsExtra", d.CreateInitfsExtra,
)
}

View File

@@ -4,6 +4,8 @@
package deviceinfo
import (
"fmt"
"reflect"
"strings"
"testing"
)
@@ -42,7 +44,6 @@ func TestNameToField(t *testing.T) {
{"modules_initfs", "ModulesInitfs"},
{"deviceinfo_initfs_compression___", "InitfsCompression"},
{"deviceinfo_initfs_extra_compression", "InitfsExtraCompression"},
{"deviceinfo_create_initfs_extra", "CreateInitfsExtra"},
}
for _, table := range tables {
@@ -58,25 +59,37 @@ func TestUnmarshal(t *testing.T) {
tables := []struct {
// field is just used for reflection within the test, so it must be a
// valid DeviceInfo field
file string
expected DeviceInfo
field string
in string
expected string
}{
{"./test_resources/deviceinfo-unmarshal-1", DeviceInfo{
FormatVersion: "0",
UbootBoardname: "foobar-bazz",
InitfsCompression: "zstd:--foo=1 -T0 --bar=bazz",
InitfsExtraCompression: "",
CreateInitfsExtra: true,
},
},
{"InitfsCompression", "deviceinfo_initfs_compression=\"gzip:-9\"\n", "gzip:-9"},
// line with multiple '='
{"InitfsCompression", "deviceinfo_initfs_compression=zstd:--foo=1 -T0 --bar=bazz", "zstd:--foo=1 -T0 --bar=bazz"},
// empty option
{"InitfsCompression", "deviceinfo_initfs_compression=\"\"\n", ""},
// line with comment at the end
{"", "# this is a comment!\n", ""},
// empty lines are fine
{"", "", ""},
// line with whitepace characters only
{"", " \t \n\r", ""},
}
var d DeviceInfo
for _, table := range tables {
if err := d.unmarshal(table.file); err != nil {
t.Error(err)
testName := fmt.Sprintf("unmarshal::'%s':", strings.ReplaceAll(table.in, "\n", "\\n"))
if err := d.unmarshal(strings.NewReader(table.in)); err != nil {
t.Errorf("%s received an unexpected err: ", err)
}
if d != table.expected {
t.Errorf("expected: %s, got: %s", table.expected, d)
// Check against expected value
field := reflect.ValueOf(&d).Elem().FieldByName(table.field)
out := ""
if table.field != "" {
out = field.String()
}
if out != table.expected {
t.Errorf("%s expected: %q, got: %q", testName, table.expected, out)
}
}

View File

@@ -1,3 +1,2 @@
deviceinfo_format_version="0"
deviceinfo_initfs_compression="gz -9"
deviceinfo_mesa_driver="panfrost"

View File

@@ -1,2 +1 @@
deviceinfo_format_version="0"
deviceinfo_mesa_driver="msm"
deviceinfo_mesa_driver="msm"

View File

@@ -1,7 +0,0 @@
deviceinfo_format_version="0"
deviceinfo_uboot_boardname="foobar-bazz"
# line with multiple =
deviceinfo_initfs_compression="zstd:--foo=1 -T0 --bar=bazz"
# empty option
deviceinfo_initfs_extra_compression=""
deviceinfo_create_initfs_extra="true" # in-line comment that should be ignored