2 Commits
1.5 ... 1.1.1

Author SHA1 Message Date
Minecrell
d62180f9d6 getModuleDeps: replace Split() loop with ReplaceAllString() (MR 12)
This should do the same as far as I can tell :)

(cherry picked from commit 866f17b86f)
2021-09-20 15:16:38 -07:00
Minecrell
b7f02ff970 getModulesDep: disallow regex submatches (MR 12)
At the moment modules in modules.dep are matched even on a submatch
e.g. looking up "msm" ends up matching "snd-soc-msm8916-digital.ko"
instead of "msm.ko". To fix this, disallow submatches using ^ and $.

(cherry picked from commit 15e99c3658)
2021-09-20 15:16:33 -07:00
10 changed files with 287 additions and 961 deletions

View File

@@ -3,9 +3,6 @@
# global settings # global settings
image: alpine:edge image: alpine:edge
variables:
GOFLAGS: "-buildvcs=false"
stages: stages:
- lint - lint
- build - build
@@ -24,13 +21,25 @@ stages:
- merge_requests - merge_requests
- tags - tags
# device documentation
gofmt linting:
stage: lint
allow_failure: true
<<: *only-default
before_script:
# specific mirror used because staticcheck hasn't made it to the other mirrors yet...
- apk -q update --repository http://dl-4.alpinelinux.org/alpine/edge/testing
- apk -q add --repository http://dl-4.alpinelinux.org/alpine/edge/testing go staticcheck
script:
- .gitlab-ci/check_linting.sh
build: build:
stage: build stage: build
<<: *only-default <<: *only-default
before_script: before_script:
- apk -q add go staticcheck make - apk -q add go
script: script:
- make test - go build -v
- make - go test ./...
artifacts: artifacts:
expire_in: 1 week expire_in: 1 week

13
.gitlab-ci/check_linting.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/sh
echo "### Running gofmt..."
files="$(gofmt -l .)"
if [ ! -z "$files" ]; then
# run gofmt to print out the diff of what needs to be changed
gofmt -d -e .
exit 1
fi
echo "### Running staticcheck..."
staticcheck ./...

View File

@@ -1,54 +0,0 @@
.POSIX:
.SUFFIXES:
PREFIX?=/usr/local
BINDIR?=$(PREFIX)/sbin
SHAREDIR?=$(PREFIX)/share
GO?=go
GOFLAGS?=
LDFLAGS+=-s -w
RM?=rm -f
GOTEST=go test -count=1 -race
GOSRC!=find * -name '*.go'
GOSRC+=go.mod go.sum
all: postmarketos-mkinitfs
postmarketos-mkinitfs: $(GOSRC)
$(GO) build $(GOFLAGS) -ldflags "$(LDFLAGS)" -o postmarketos-mkinitfs
.PHONY: fmt
fmt:
gofmt -w .
test:
@if [ `gofmt -l . | wc -l` -ne 0 ]; then \
gofmt -d .; \
echo "ERROR: source files need reformatting with gofmt"; \
exit 1; \
fi
@staticcheck ./...
@$(GOTEST) ./...
clean:
$(RM) postmarketos-mkinitfs
install: $(DOCS) postmarketos-mkinitfs
install -Dm755 postmarketos-mkinitfs -t $(DESTDIR)$(BINDIR)/
ln -sf postmarketos-mkinitfs $(DESTDIR)$(BINDIR)/mkinitfs
.PHONY: checkinstall
checkinstall:
test -e $(DESTDIR)$(BINDIR)/postmarketos-mkinitfs
test -L $(DESTDIR)$(BINDIR)/mkinitfs
RMDIR_IF_EMPTY:=sh -c '! [ -d $$0 ] || ls -1qA $$0 | grep -q . || rmdir $$0'
uninstall:
$(RM) $(DESTDIR)$(BINDIR)/postmarketos-mkinitfs
$(RM) $(DESTDIR)$(BINDIR)/mkinitfs
${RMDIR_IF_EMPTY} $(DESTDIR)$(BINDIR)
.PHONY: all clean install uninstall test

View File

@@ -1,45 +0,0 @@
`postmarketos-mkinitfs` is a tool for generating an initramfs (and installing
it) on postmarketOS.
## Building
Building this project requires a Go compiler/toolchain and `make`:
```
$ make
```
To install locally:
```
$ make install
```
Installation prefix can be set in the generally accepted way with setting
`PREFIX`:
```
$ make PREFIX=/some/location
# make PREFIX=/some/location install
```
Other paths can be modified from the command line as well, see the top section of
the `Makefile` for more information.
Tests (functional and linting) can be executed by using the `test` make target:
```
$ make test
```
## Usage
The application uses configuration from `/etc/deviceinfo`, and does not support
any other options at runtime. It can be run simply by executing:
```
$ postmarketos-mkinitfs
```
For historical reasons, a symlink from `mkinitfs` to `postmarketos-mkinitfs` is
also installed by the makefile's `install` target.

504
main.go
View File

@@ -1,4 +1,4 @@
// Copyright 2022 Clayton Craft <clayton@craftyguy.net> // Copyright 2021 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later // SPDX-License-Identifier: GPL-3.0-or-later
package main package main
@@ -10,6 +10,7 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"log" "log"
"os" "os"
"os/exec" "os/exec"
@@ -53,7 +54,7 @@ func main() {
} }
// temporary working dir // temporary working dir
workDir, err := os.MkdirTemp("", "mkinitfs") workDir, err := ioutil.TempDir("", "mkinitfs")
if err != nil { if err != nil {
log.Fatal("Unable to create temporary work directory:", err) log.Fatal("Unable to create temporary work directory:", err)
} }
@@ -70,14 +71,6 @@ func main() {
log.Fatal("generateInitfsExtra: ", err) log.Fatal("generateInitfsExtra: ", err)
} }
if err := copyUbootFiles(workDir, devinfo); errors.Is(err, os.ErrNotExist) {
log.Println("u-boot files copying skipped: ", err)
} else {
if err != nil {
log.Fatal("copyUbootFiles: ", err)
}
}
// Final processing of initramfs / kernel is done by boot-deploy // Final processing of initramfs / kernel is done by boot-deploy
if err := bootDeploy(workDir, *outDir); err != nil { if err := bootDeploy(workDir, *outDir); err != nil {
log.Fatal("bootDeploy: ", err) log.Fatal("bootDeploy: ", err)
@@ -93,29 +86,18 @@ func bootDeploy(workDir string, outDir string) error {
if len(kernels) == 0 { if len(kernels) == 0 {
return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*")) return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*"))
} }
kernFile, err := os.Open(kernels[0])
// Pick a kernel that does not have suffixes added by boot-deploy
var kernFile string
for _, f := range kernels {
if strings.HasSuffix(f, "-dtb") || strings.HasSuffix(f, "-mtk") {
continue
}
kernFile = f
break
}
kernFd, err := os.Open(kernFile)
if err != nil { if err != nil {
return err return err
} }
defer kernFd.Close() defer kernFile.Close()
kernFileCopy, err := os.Create(filepath.Join(workDir, "vmlinuz")) kernFileCopy, err := os.Create(filepath.Join(workDir, "vmlinuz"))
if err != nil { if err != nil {
return err return err
} }
if _, err = io.Copy(kernFileCopy, kernFd); err != nil { if _, err = io.Copy(kernFileCopy, kernFile); err != nil {
return err return err
} }
kernFileCopy.Close() kernFileCopy.Close()
@@ -148,72 +130,72 @@ func exists(file string) bool {
return false return false
} }
func getHookFiles(filesdir string) (files []string, err error) { func getHookFiles(filesdir string) misc.StringSet {
fileInfo, err := os.ReadDir(filesdir) fileInfo, err := ioutil.ReadDir(filesdir)
if err != nil { if err != nil {
return nil, err log.Fatal(err)
} }
files := make(misc.StringSet)
for _, file := range fileInfo { for _, file := range fileInfo {
path := filepath.Join(filesdir, file.Name()) path := filepath.Join(filesdir, file.Name())
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { if err != nil {
return nil, err log.Fatal(err)
} }
defer f.Close() defer f.Close()
s := bufio.NewScanner(f) s := bufio.NewScanner(f)
for s.Scan() { for s.Scan() {
if filelist, err := getFiles([]string{s.Text()}, true); err != nil { if !exists(s.Text()) {
return nil, fmt.Errorf("unable to find file %q required by %q", s.Text(), path) log.Fatalf("Unable to find file %q required by %q", s.Text(), path)
} else {
files = append(files, filelist...)
} }
files[s.Text()] = false
} }
if err := s.Err(); err != nil { if err := s.Err(); err != nil {
return nil, err log.Fatal(err)
} }
} }
return files, nil return files
} }
// Recursively list all dependencies for a given ELF binary // Recursively list all dependencies for a given ELF binary
func getBinaryDeps(file string) (files []string, err error) { func getBinaryDeps(files misc.StringSet, file string) error {
// if file is a symlink, resolve dependencies for target // if file is a symlink, resolve dependencies for target
fileStat, err := os.Lstat(file) fileStat, err := os.Lstat(file)
if err != nil { if err != nil {
return nil, fmt.Errorf("getBinaryDeps: failed to stat file %q: %w", file, err) log.Print("getBinaryDeps: failed to stat file")
return err
} }
// Symlink: write symlink to archive then set 'file' to link target // Symlink: write symlink to archive then set 'file' to link target
if fileStat.Mode()&os.ModeSymlink != 0 { if fileStat.Mode()&os.ModeSymlink != 0 {
target, err := os.Readlink(file) target, err := os.Readlink(file)
if err != nil { if err != nil {
return nil, fmt.Errorf("getBinaryDeps: unable to read symlink %q: %w", file, err) log.Print("getBinaryDeps: unable to read symlink: ", file)
return err
} }
if !filepath.IsAbs(target) { if !filepath.IsAbs(target) {
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file)) target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
if err != nil { if err != nil {
return files, err return err
} }
} }
binaryDepFiles, err := getBinaryDeps(target) if err := getBinaryDeps(files, target); err != nil {
if err != nil { return err
return files, err
} }
files = append(files, binaryDepFiles...) return err
return files, err
} }
// get dependencies for binaries // get dependencies for binaries
fd, err := elf.Open(file) fd, err := elf.Open(file)
if err != nil { if err != nil {
return nil, fmt.Errorf("getBinaryDeps: unable to open elf binary %q: %w", file, err) log.Fatal(err)
} }
libs, _ := fd.ImportedLibraries() libs, _ := fd.ImportedLibraries()
fd.Close() fd.Close()
files = append(files, file) files[file] = false
if len(libs) == 0 { if len(libs) == 0 {
return files, err return err
} }
libdirs := []string{"/usr/lib", "/lib"} libdirs := []string{"/usr/lib", "/lib"}
@@ -222,96 +204,55 @@ func getBinaryDeps(file string) (files []string, err error) {
for _, libdir := range libdirs { for _, libdir := range libdirs {
path := filepath.Join(libdir, lib) path := filepath.Join(libdir, lib)
if _, err := os.Stat(path); err == nil { if _, err := os.Stat(path); err == nil {
binaryDepFiles, err := getBinaryDeps(path) err := getBinaryDeps(files, path)
if err != nil { if err != nil {
return files, err return err
} }
files = append(files, binaryDepFiles...) files[path] = false
files = append(files, path)
found = true found = true
break break
} }
} }
if !found { if !found {
return nil, fmt.Errorf("getBinaryDeps: unable to locate dependency for %q: %s", file, lib) log.Fatalf("Unable to locate dependency for %q: %s", file, lib)
} }
} }
return return nil
} }
func getFiles(list []string, required bool) (files []string, err error) { func getFiles(files misc.StringSet, newFiles misc.StringSet, required bool) error {
for _, file := range list { for file := range newFiles {
filelist, err := getFile(file, required) err := getFile(files, file, required)
if err != nil { if err != nil {
return nil, err return err
} }
files = append(files, filelist...)
} }
return nil
files = misc.RemoveDuplicates(files)
return
} }
func getFile(file string, required bool) (files []string, err error) { func getFile(files misc.StringSet, file string, required bool) error {
// Expand glob expression if !exists(file) {
expanded, err := filepath.Glob(file)
if err != nil {
return
}
if len(expanded) > 0 && expanded[0] != file {
for _, path := range expanded {
if globFiles, err := getFile(path, required); err != nil {
return files, err
} else {
files = append(files, globFiles...)
}
}
return misc.RemoveDuplicates(files), nil
}
fileInfo, err := os.Stat(file)
if err != nil {
if required { if required {
return files, errors.New("getFile: File does not exist :" + file) return errors.New("getFile: File does not exist :" + file)
} }
return files, nil return nil
} }
if fileInfo.IsDir() { files[file] = false
// Recurse over directory contents
err := filepath.Walk(file, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if f.IsDir() {
return nil
}
newFiles, err := getFile(path, required)
if err != nil {
return err
}
files = append(files, newFiles...)
return nil
})
if err != nil {
return files, err
}
} else {
files = append(files, file)
// get dependencies for binaries // get dependencies for binaries
if _, err := elf.Open(file); err == nil { if _, err := elf.Open(file); err != nil {
if binaryDepFiles, err := getBinaryDeps(file); err != nil { // file is not an elf, so don't resolve lib dependencies
return files, err return nil
} else {
files = append(files, binaryDepFiles...)
}
}
} }
files = misc.RemoveDuplicates(files) err := getBinaryDeps(files, file)
return if err != nil {
return err
}
return nil
} }
func getOskConfFontPath(oskConfPath string) (string, error) { func getOskConfFontPath(oskConfPath string) (string, error) {
@@ -338,199 +279,166 @@ func getOskConfFontPath(oskConfPath string) (string, error) {
// Get a list of files and their dependencies related to supporting rootfs full // Get a list of files and their dependencies related to supporting rootfs full
// disk (d)encryption // disk (d)encryption
func getFdeFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) { func getFdeFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
confFiles := []string{ confFiles := misc.StringSet{
"/etc/osk.conf", "/etc/osk.conf": false,
"/etc/ts.conf", "/etc/ts.conf": false,
"/etc/pointercal", "/etc/pointercal": false,
"/etc/fb.modes", "/etc/fb.modes": false,
"/etc/directfbrc", "/etc/directfbrc": false,
} }
// TODO: this shouldn't be false? though some files (pointercal) don't always exist... // TODO: this shouldn't be false? though some files (pointercal) don't always exist...
if files, err = getFiles(confFiles, false); err != nil { if err := getFiles(files, confFiles, false); err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err) return err
} }
// osk-sdl // osk-sdl
oskFiles := []string{ oskFiles := misc.StringSet{
"/usr/bin/osk-sdl", "/usr/bin/osk-sdl": false,
"/sbin/cryptsetup", "/sbin/cryptsetup": false,
"/usr/lib/libGL.so.1", "/usr/lib/libGL.so.1": false}
} if err := getFiles(files, oskFiles, true); err != nil {
if filelist, err := getFiles(oskFiles, true); err != nil { return err
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
} else {
files = append(files, filelist...)
} }
fontFile, err := getOskConfFontPath("/etc/osk.conf") fontFile, err := getOskConfFontPath("/etc/osk.conf")
if err != nil { if err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add file %q: %w", fontFile, err) return err
} }
files = append(files, fontFile) files[fontFile] = false
// Directfb // Directfb
dfbFiles := []string{} dfbFiles := make(misc.StringSet)
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error { err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
if filepath.Ext(path) == ".so" { if filepath.Ext(path) == ".so" {
dfbFiles = append(dfbFiles, path) dfbFiles[path] = false
} }
return nil return nil
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add file %w", err) log.Print("getBinaryDeps: failed to stat file")
return err
} }
if filelist, err := getFiles(dfbFiles, true); err != nil { if err := getFiles(files, dfbFiles, true); err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err) return err
} else {
files = append(files, filelist...)
} }
// tslib // tslib
tslibFiles := []string{} tslibFiles := make(misc.StringSet)
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error { err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
if filepath.Ext(path) == ".so" { if filepath.Ext(path) == ".so" {
tslibFiles = append(tslibFiles, path) tslibFiles[path] = false
} }
return nil return nil
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add file: %w", err) log.Print("getBinaryDeps: failed to stat file")
return err
} }
libts, _ := filepath.Glob("/usr/lib/libts*") libts, _ := filepath.Glob("/usr/lib/libts*")
tslibFiles = append(tslibFiles, libts...) for _, file := range libts {
if filelist, err := getFiles(tslibFiles, true); err != nil { tslibFiles[file] = false
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err) }
} else { if err = getFiles(files, tslibFiles, true); err != nil {
files = append(files, filelist...) return err
} }
// mesa hw accel // mesa hw accel
if devinfo.MesaDriver != "" { if devinfo.MesaDriver != "" {
mesaFiles := []string{ mesaFiles := misc.StringSet{
"/usr/lib/libEGL.so.1", "/usr/lib/libEGL.so.1": false,
"/usr/lib/libGLESv2.so.2", "/usr/lib/libGLESv2.so.2": false,
"/usr/lib/libgbm.so.1", "/usr/lib/libgbm.so.1": false,
"/usr/lib/libudev.so.1", "/usr/lib/libudev.so.1": false,
"/usr/lib/xorg/modules/dri/" + devinfo.MesaDriver + "_dri.so", "/usr/lib/xorg/modules/dri/" + devinfo.MesaDriver + "_dri.so": false,
} }
if filelist, err := getFiles(mesaFiles, true); err != nil { if err := getFiles(files, mesaFiles, true); err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err) return err
} else {
files = append(files, filelist...)
} }
} }
return return nil
} }
func getHookScripts() (files []string) { func getHookScripts(files misc.StringSet) {
scripts, _ := filepath.Glob("/etc/postmarketos-mkinitfs/hooks/*.sh") scripts, _ := filepath.Glob("/etc/postmarketos-mkinitfs/hooks/*.sh")
files = append(files, scripts...) for _, script := range scripts {
files[script] = false
return }
} }
func getInitfsExtraFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) { func getInitfsExtraFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
log.Println("== Generating initramfs extra ==") log.Println("== Generating initramfs extra ==")
binariesExtra := []string{ binariesExtra := misc.StringSet{
"/lib/libz.so.1", "/lib/libz.so.1": false,
"/sbin/btrfs", "/sbin/dmsetup": false,
"/sbin/dmsetup", "/sbin/e2fsck": false,
"/sbin/e2fsck", "/usr/sbin/parted": false,
"/usr/sbin/parted", "/usr/sbin/resize2fs": false,
"/usr/sbin/resize2fs", "/usr/sbin/resize.f2fs": false,
"/usr/sbin/resize.f2fs",
} }
log.Println("- Including extra binaries") log.Println("- Including extra binaries")
if filelist, err := getFiles(binariesExtra, true); err != nil { if err := getFiles(files, binariesExtra, true); err != nil {
return nil, err return err
} else {
files = append(files, filelist...)
}
// Hook files & scripts
if exists("/etc/postmarketos-mkinitfs/files-extra") {
log.Println("- Including hook files")
var hookFiles []string
hookFiles, err := getHookFiles("/etc/postmarketos-mkinitfs/files-extra")
if err != nil {
return nil, err
}
if filelist, err := getFiles(hookFiles, true); err != nil {
return nil, err
} else {
files = append(files, filelist...)
}
} }
if exists("/usr/bin/osk-sdl") { if exists("/usr/bin/osk-sdl") {
log.Println("- Including FDE support") log.Println("- Including FDE support")
if fdeFiles, err := getFdeFiles(devinfo); err != nil { if err := getFdeFiles(files, devinfo); err != nil {
return nil, err return err
} else {
files = append(files, fdeFiles...)
} }
} else { } else {
log.Println("- *NOT* including FDE support") log.Println("- *NOT* including FDE support")
} }
return return nil
} }
func getInitfsFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) { func getInitfsFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
log.Println("== Generating initramfs ==") log.Println("== Generating initramfs ==")
requiredFiles := []string{ requiredFiles := misc.StringSet{
"/bin/busybox", "/bin/busybox": false,
"/bin/sh", "/bin/sh": false,
"/bin/busybox-extras", "/bin/busybox-extras": false,
"/usr/sbin/telnetd", "/usr/sbin/telnetd": false,
"/sbin/kpartx", "/sbin/kpartx": false,
"/etc/deviceinfo", "/etc/deviceinfo": false,
"/usr/bin/unudhcpd",
} }
// Hook files & scripts // Hook files & scripts
if exists("/etc/postmarketos-mkinitfs/files") { if exists("/etc/postmarketos-mkinitfs/files") {
log.Println("- Including hook files") log.Println("- Including hook files")
if hookFiles, err := getHookFiles("/etc/postmarketos-mkinitfs/files"); err != nil { hookFiles := getHookFiles("/etc/postmarketos-mkinitfs/files")
return nil, err if err := getFiles(files, hookFiles, true); err != nil {
} else { return err
if filelist, err := getFiles(hookFiles, true); err != nil {
return nil, err
} else {
files = append(files, filelist...)
}
} }
} }
log.Println("- Including hook scripts") log.Println("- Including hook scripts")
hookScripts := getHookScripts() getHookScripts(files)
files = append(files, hookScripts...)
log.Println("- Including required binaries") log.Println("- Including required binaries")
if filelist, err := getFiles(requiredFiles, true); err != nil { if err := getFiles(files, requiredFiles, true); err != nil {
return nil, err return err
} else {
files = append(files, filelist...)
} }
return return nil
} }
func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []string, err error) { func getInitfsModules(files misc.StringSet, devinfo deviceinfo.DeviceInfo, kernelVer string) error {
log.Println("- Including kernel modules") log.Println("- Including kernel modules")
modDir := filepath.Join("/lib/modules", kernelVer) modDir := filepath.Join("/lib/modules", kernelVer)
if !exists(modDir) { if !exists(modDir) {
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message // dir /lib/modules/<kernel> if kernel built without module support, so just print a message
log.Printf("-- kernel module directory not found: %q, not including modules", modDir) log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
return return nil
} }
// modules.* required by modprobe // modules.* required by modprobe
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*")) modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
files = append(files, modprobeFiles...) for _, file := range modprobeFiles {
files[file] = false
}
// module name (without extension), or directory (trailing slash is important! globs OK) // module name (without extension), or directory (trailing slash is important! globs OK)
requiredModules := []string{ requiredModules := []string{
@@ -548,18 +456,16 @@ func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []
dir = filepath.Join(modDir, dir) dir = filepath.Join(modDir, dir)
dirs, _ := filepath.Glob(dir) dirs, _ := filepath.Glob(dir)
for _, d := range dirs { for _, d := range dirs {
if filelist, err := getModulesInDir(d); err != nil { if err := getModulesInDir(files, d); err != nil {
return nil, fmt.Errorf("getInitfsModules: unable to get modules dir %q: %w", d, err) log.Print("Unable to get modules in dir: ", d)
} else { return err
files = append(files, filelist...)
} }
} }
} else if dir == "" { } else if dir == "" {
// item is a module name // item is a module name
if filelist, err := getModule(file, modDir); err != nil { if err := getModule(files, file, modDir); err != nil {
return nil, fmt.Errorf("getInitfsModules: unable to get module %q: %w", file, err) log.Print("Unable to get module: ", file)
} else { return err
files = append(files, filelist...)
} }
} else { } else {
log.Printf("Unknown module entry: %q", item) log.Printf("Unknown module entry: %q", item)
@@ -568,10 +474,9 @@ func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []
// deviceinfo modules // deviceinfo modules
for _, module := range strings.Fields(devinfo.ModulesInitfs) { for _, module := range strings.Fields(devinfo.ModulesInitfs) {
if filelist, err := getModule(module, modDir); err != nil { if err := getModule(files, module, modDir); err != nil {
return nil, fmt.Errorf("getInitfsModules: unable to get modules from deviceinfo: %w", err) log.Print("Unable to get modules from deviceinfo")
} else { return err
files = append(files, filelist...)
} }
} }
@@ -580,20 +485,20 @@ func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []
for _, modFile := range initfsModFiles { for _, modFile := range initfsModFiles {
f, err := os.Open(modFile) f, err := os.Open(modFile)
if err != nil { if err != nil {
return nil, fmt.Errorf("getInitfsModules: unable to open mkinitfs modules file %q: %w", modFile, err) log.Print("getInitfsModules: unable to open mkinitfs modules file: ", modFile)
return err
} }
defer f.Close() defer f.Close()
s := bufio.NewScanner(f) s := bufio.NewScanner(f)
for s.Scan() { for s.Scan() {
if filelist, err := getModule(s.Text(), modDir); err != nil { if err := getModule(files, s.Text(), modDir); err != nil {
return nil, fmt.Errorf("getInitfsModules: unable to get module file %q: %w", s.Text(), err) log.Print("getInitfsModules: unable to get module file: ", s.Text())
} else { return err
files = append(files, filelist...)
} }
} }
} }
return return nil
} }
func getKernelReleaseFile() (string, error) { func getKernelReleaseFile() (string, error) {
@@ -622,50 +527,6 @@ func getKernelVersion() (string, error) {
return strings.TrimSpace(string(contents)), nil return strings.TrimSpace(string(contents)), nil
} }
func Copy(srcFile, dstFile string) error {
out, err := os.Create(dstFile)
if err != nil {
return err
}
defer out.Close()
in, err := os.Open(srcFile)
if err != nil {
return err
}
defer in.Close()
_, err = io.Copy(out, in)
if err != nil {
return err
}
return nil
}
func copyUbootFiles(path string, devinfo deviceinfo.DeviceInfo) error {
if devinfo.UbootBoardname == "" {
return nil
}
srcDir := filepath.Join("/usr/share/u-boot", devinfo.UbootBoardname)
entries, err := os.ReadDir(srcDir)
if err != nil {
return err
}
for _, entry := range entries {
sourcePath := filepath.Join(srcDir, entry.Name())
destPath := filepath.Join(path, entry.Name())
if err := Copy(sourcePath, destPath); err != nil {
return err
}
}
return nil
}
func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo.DeviceInfo) error { func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo.DeviceInfo) error {
initfsArchive, err := archive.New() initfsArchive, err := archive.New()
if err != nil { if err != nil {
@@ -677,40 +538,18 @@ func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc", "/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
} }
for _, dir := range requiredDirs { for _, dir := range requiredDirs {
if err := initfsArchive.AddItem(dir, dir); err != nil { initfsArchive.Dirs[dir] = false
return err
}
} }
if files, err := getInitfsFiles(devinfo); err != nil { if err := getInitfsFiles(initfsArchive.Files, devinfo); err != nil {
return err return err
} else {
items := make(map[string]string)
// copy files into a map, where the source(key) and dest(value) are the
// same
for _, f := range files {
items[f] = f
}
if err := initfsArchive.AddItems(items); err != nil {
return err
}
} }
if files, err := getInitfsModules(devinfo, kernVer); err != nil { if err := getInitfsModules(initfsArchive.Files, devinfo, kernVer); err != nil {
return err return err
} else {
items := make(map[string]string)
// copy files into a map, where the source(key) and dest(value) are the
// same
for _, f := range files {
items[f] = f
}
if err := initfsArchive.AddItems(items); err != nil {
return err
}
} }
if err := initfsArchive.AddItem("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil { if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil {
return err return err
} }
@@ -719,13 +558,13 @@ func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo
splashFiles, _ := filepath.Glob("/usr/share/postmarketos-splashes/*.ppm.gz") splashFiles, _ := filepath.Glob("/usr/share/postmarketos-splashes/*.ppm.gz")
for _, file := range splashFiles { for _, file := range splashFiles {
// splash images are expected at /<file> // splash images are expected at /<file>
if err := initfsArchive.AddItem(file, filepath.Join("/", filepath.Base(file))); err != nil { if err := initfsArchive.AddFile(file, filepath.Join("/", filepath.Base(file))); err != nil {
return err return err
} }
} }
// initfs_functions // initfs_functions
if err := initfsArchive.AddItem("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil { if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil {
return err return err
} }
@@ -743,19 +582,8 @@ func generateInitfsExtra(name string, path string, devinfo deviceinfo.DeviceInfo
return err return err
} }
if files, err := getInitfsExtraFiles(devinfo); err != nil { if err := getInitfsExtraFiles(initfsExtraArchive.Files, devinfo); err != nil {
return err return err
} else {
items := make(map[string]string)
// copy files into a map, where the source(key) and dest(value) are the
// same
for _, f := range files {
items[f] = f
}
if err := initfsExtraArchive.AddItems(items); err != nil {
return err
}
} }
log.Println("- Writing and verifying initramfs-extra archive") log.Println("- Writing and verifying initramfs-extra archive")
@@ -770,20 +598,20 @@ func stripExts(file string) string {
return strings.Split(file, ".")[0] return strings.Split(file, ".")[0]
} }
func getModulesInDir(modPath string) (files []string, err error) { func getModulesInDir(files misc.StringSet, modPath string) error {
err = filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error { err := filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error {
// TODO: need to support more extensions? // TODO: need to support more extensions?
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" { if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
return nil return nil
} }
files = append(files, path) files[path] = false
return nil return nil
}) })
if err != nil { if err != nil {
return nil, err return err
} }
return return nil
} }
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module // Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
@@ -792,33 +620,35 @@ func getModulesInDir(modPath string) (files []string, err error) {
// have been built into the kernel // have been built into the kernel
// TODO: look for it in modules.builtin, and make it fatal if it can't be found // TODO: look for it in modules.builtin, and make it fatal if it can't be found
// anywhere // anywhere
func getModule(modName string, modDir string) (files []string, err error) { func getModule(files misc.StringSet, modName string, modDir string) error {
modDep := filepath.Join(modDir, "modules.dep") modDep := filepath.Join(modDir, "modules.dep")
if !exists(modDep) { if !exists(modDep) {
return nil, fmt.Errorf("kernel module.dep not found: %s", modDir) log.Fatal("Kernel module.dep not found: ", modDir)
} }
fd, err := os.Open(modDep) fd, err := os.Open(modDep)
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to open modules.dep: %w", err) log.Print("Unable to open modules.dep: ", modDep)
return err
} }
defer fd.Close() defer fd.Close()
deps, err := getModuleDeps(modName, fd) deps, err := getModuleDeps(modName, fd)
if err != nil { if err != nil {
return nil, err return err
} }
for _, dep := range deps { for _, dep := range deps {
p := filepath.Join(modDir, dep) p := filepath.Join(modDir, dep)
if !exists(p) { if !exists(p) {
return nil, fmt.Errorf("tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p) log.Print(fmt.Sprintf("Tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p))
return err
} }
files = append(files, p) files[p] = false
} }
return return err
} }
// Get the canonicalized name for the module as represented in the given modules.dep io.reader // Get the canonicalized name for the module as represented in the given modules.dep io.reader

View File

@@ -6,23 +6,19 @@ package archive
import ( import (
"bytes" "bytes"
"compress/flate" "compress/flate"
"fmt" "github.com/cavaliercoder/go-cpio"
"github.com/klauspost/pgzip"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
"io" "io"
"log" "log"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"strings" "strings"
"sync"
"syscall"
"github.com/cavaliercoder/go-cpio"
"github.com/klauspost/pgzip"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
) )
type Archive struct { type Archive struct {
items archiveItems Dirs misc.StringSet
Files misc.StringSet
cpioWriter *cpio.Writer cpioWriter *cpio.Writer
buf *bytes.Buffer buf *bytes.Buffer
} }
@@ -31,162 +27,83 @@ func New() (*Archive, error) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
archive := &Archive{ archive := &Archive{
cpioWriter: cpio.NewWriter(buf), cpioWriter: cpio.NewWriter(buf),
Files: make(misc.StringSet),
Dirs: make(misc.StringSet),
buf: buf, buf: buf,
} }
return archive, nil return archive, nil
} }
type archiveItem struct {
sourcePath string
header *cpio.Header
}
type archiveItems struct {
items []archiveItem
sync.RWMutex
}
// Adds the given item to the archiveItems, only if it doesn't already exist in
// the list. The items are kept sorted in ascending order.
func (a *archiveItems) Add(item archiveItem) {
a.Lock()
defer a.Unlock()
if len(a.items) < 1 {
// empty list
a.items = append(a.items, item)
return
}
// find existing item, or index of where new item should go
i := sort.Search(len(a.items), func(i int) bool {
return strings.Compare(item.header.Name, a.items[i].header.Name) <= 0
})
if i >= len(a.items) {
// doesn't exist in list, but would be at the very end
a.items = append(a.items, item)
return
}
if strings.Compare(a.items[i].header.Name, item.header.Name) == 0 {
// already in list
return
}
// grow list by 1, shift right at index, and insert new string at index
a.items = append(a.items, archiveItem{})
copy(a.items[i+1:], a.items[i:])
a.items[i] = item
}
// iterate through items and send each one over the returned channel
func (a *archiveItems) IterItems() <-chan archiveItem {
ch := make(chan archiveItem)
go func() {
a.RLock()
defer a.RUnlock()
for _, item := range a.items {
ch <- item
}
close(ch)
}()
return ch
}
func (archive *Archive) Write(path string, mode os.FileMode) error { func (archive *Archive) Write(path string, mode os.FileMode) error {
if err := archive.writeCpio(); err != nil { if err := archive.writeCpio(); err != nil {
return err return err
} }
if err := archive.cpioWriter.Close(); err != nil { if err := archive.cpioWriter.Close(); err != nil {
return fmt.Errorf("archive.Write: error closing archive: %w", err) return err
} }
// Write archive to path // Write archive to path
if err := archive.writeCompressed(path, mode); err != nil { if err := archive.writeCompressed(path, mode); err != nil {
return fmt.Errorf("unable to write archive to location %q: %w", path, err) log.Print("Unable to write archive to location: ", path)
return err
} }
if err := os.Chmod(path, mode); err != nil { if err := os.Chmod(path, mode); err != nil {
return fmt.Errorf("unable to chmod %q to %s: %w", path, mode, err) return err
} }
return nil return nil
} }
// Adds the given items in the map to the archive. The map format is {source path:dest path}. func (archive *Archive) AddFile(file string, dest string) error {
// Internally this just calls AddItem on each key,value pair in the map.
func (archive *Archive) AddItems(paths map[string]string) error {
for s, d := range paths {
if err := archive.AddItem(s, d); err != nil {
return err
}
}
return nil
}
// Adds the given file or directory at "source" to the archive at "dest"
func (archive *Archive) AddItem(source string, dest string) error {
sourceStat, err := os.Lstat(source)
if err != nil {
e, ok := err.(*os.PathError)
if e.Err == syscall.ENOENT && ok {
// doesn't exist in current filesystem, assume it's a new directory
return archive.addDir(dest)
}
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
}
if sourceStat.Mode()&os.ModeDir != 0 {
return archive.addDir(dest)
}
return archive.addFile(source, dest)
}
func (archive *Archive) addFile(source string, dest string) error {
if err := archive.addDir(filepath.Dir(dest)); err != nil { if err := archive.addDir(filepath.Dir(dest)); err != nil {
return err return err
} }
sourceStat, err := os.Lstat(source) if archive.Files[file] {
// Already written to cpio
return nil
}
fileStat, err := os.Lstat(file)
if err != nil { if err != nil {
log.Print("addFile: failed to stat file: ", source) log.Print("AddFile: failed to stat file: ", file)
return err return err
} }
// Symlink: write symlink to archive then set 'file' to link target // Symlink: write symlink to archive then set 'file' to link target
if sourceStat.Mode()&os.ModeSymlink != 0 { if fileStat.Mode()&os.ModeSymlink != 0 {
// log.Printf("File %q is a symlink", file) // log.Printf("File %q is a symlink", file)
target, err := os.Readlink(source) target, err := os.Readlink(file)
if err != nil { if err != nil {
log.Print("addFile: failed to get symlink target: ", source) log.Print("AddFile: failed to get symlink target: ", file)
return err return err
} }
destFilename := strings.TrimPrefix(dest, "/") destFilename := strings.TrimPrefix(dest, "/")
hdr := &cpio.Header{
Name: destFilename,
Linkname: target,
Mode: 0644 | cpio.ModeSymlink,
Size: int64(len(target)),
// Checksum: 1,
}
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
return err
}
if _, err = archive.cpioWriter.Write([]byte(target)); err != nil {
return err
}
archive.items.Add(archiveItem{ archive.Files[file] = true
sourcePath: source,
header: &cpio.Header{
Name: destFilename,
Linkname: target,
Mode: 0644 | cpio.ModeSymlink,
Size: int64(len(target)),
// Checksum: 1,
},
})
if filepath.Dir(target) == "." { if filepath.Dir(target) == "." {
target = filepath.Join(filepath.Dir(source), target) target = filepath.Join(filepath.Dir(file), target)
} }
// make sure target is an absolute path // make sure target is an absolute path
if !filepath.IsAbs(target) { if !filepath.IsAbs(target) {
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(source)) target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
if err != nil { if err != nil {
return err return err
} }
@@ -194,21 +111,34 @@ func (archive *Archive) addFile(source string, dest string) error {
// TODO: add verbose mode, print stuff like this: // TODO: add verbose mode, print stuff like this:
// log.Printf("symlink: %q, target: %q", file, target) // log.Printf("symlink: %q, target: %q", file, target)
// write symlink target // write symlink target
err = archive.addFile(target, target) err = archive.AddFile(target, target)
return err return err
} }
destFilename := strings.TrimPrefix(dest, "/") // log.Printf("writing file: %q", file)
archive.items.Add(archiveItem{ fd, err := os.Open(file)
sourcePath: source, if err != nil {
header: &cpio.Header{ return err
Name: destFilename, }
Mode: cpio.FileMode(sourceStat.Mode().Perm()), defer fd.Close()
Size: sourceStat.Size(),
// Checksum: 1, destFilename := strings.TrimPrefix(dest, "/")
}, hdr := &cpio.Header{
}) Name: destFilename,
Mode: cpio.FileMode(fileStat.Mode().Perm()),
Size: fileStat.Size(),
// Checksum: 1,
}
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
return err
}
if _, err = io.Copy(archive.cpioWriter, fd); err != nil {
return err
}
archive.Files[file] = true
return nil return nil
} }
@@ -246,48 +176,29 @@ func (archive *Archive) writeCompressed(path string, mode os.FileMode) error {
} }
func (archive *Archive) writeCpio() error { func (archive *Archive) writeCpio() error {
// having a transient function for actually adding files to the archive // Write any dirs added explicitly
// allows the deferred fd.close to run after every copy and prevent having for dir := range archive.Dirs {
// tons of open file handles until the copying is all done archive.addDir(dir)
copyToArchive := func(source string, header *cpio.Header) error {
if err := archive.cpioWriter.WriteHeader(header); err != nil {
return fmt.Errorf("archive.writeCpio: unable to write header: %w", err)
}
// don't copy actual dirs into the archive, writing the header is enough
if !header.Mode.IsDir() {
if header.Mode.IsRegular() {
fd, err := os.Open(source)
if err != nil {
return fmt.Errorf("archive.writeCpio: uname to open file %q, %w", source, err)
}
defer fd.Close()
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
return fmt.Errorf("archive.writeCpio: unable to write out archive: %w", err)
}
} else if header.Linkname != "" {
// the contents of a symlink is just need the link name
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %w", err)
}
} else {
return fmt.Errorf("archive.writeCpio: unknown type for file: %s", source)
}
}
return nil
} }
for i := range archive.items.IterItems() { // Write files and any missing parent dirs
if err := copyToArchive(i.sourcePath, i.header); err != nil { for file, imported := range archive.Files {
if imported {
continue
}
if err := archive.AddFile(file, file); err != nil {
return err return err
} }
} }
return nil return nil
} }
func (archive *Archive) addDir(dir string) error { func (archive *Archive) addDir(dir string) error {
if archive.Dirs[dir] {
// Already imported
return nil
}
if dir == "/" { if dir == "/" {
dir = "." dir = "."
} }
@@ -295,13 +206,19 @@ func (archive *Archive) addDir(dir string) error {
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/") subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
for i, subdir := range subdirs { for i, subdir := range subdirs {
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir) path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
archive.items.Add(archiveItem{ if archive.Dirs[path] {
sourcePath: path, // Subdir already imported
header: &cpio.Header{ continue
Name: path, }
Mode: cpio.ModeDir | 0755, err := archive.cpioWriter.WriteHeader(&cpio.Header{
}, Name: path,
Mode: cpio.ModeDir | 0755,
}) })
if err != nil {
return err
}
archive.Dirs[path] = true
// log.Print("wrote dir: ", path)
} }
return nil return nil

View File

@@ -1,189 +0,0 @@
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package archive
import (
"reflect"
"testing"
"github.com/cavaliercoder/go-cpio"
)
func TestArchiveItemsAdd(t *testing.T) {
subtests := []struct {
name string
inItems []archiveItem
inItem archiveItem
expected []archiveItem
}{
{
name: "empty list",
inItems: []archiveItem{},
inItem: archiveItem{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
expected: []archiveItem{
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
},
},
{
name: "already exists",
inItems: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
},
inItem: archiveItem{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
expected: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
},
},
{
name: "add new",
inItems: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
{
sourcePath: "/foo/bar1",
header: &cpio.Header{Name: "/foo/bar1"},
},
},
inItem: archiveItem{
sourcePath: "/foo/bar0",
header: &cpio.Header{Name: "/foo/bar0"},
},
expected: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
{
sourcePath: "/foo/bar0",
header: &cpio.Header{Name: "/foo/bar0"},
},
{
sourcePath: "/foo/bar1",
header: &cpio.Header{Name: "/foo/bar1"},
},
},
},
{
name: "add new at beginning",
inItems: []archiveItem{
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
},
inItem: archiveItem{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
expected: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
},
},
{
name: "add new at end",
inItems: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
},
inItem: archiveItem{
sourcePath: "/zzz/bazz",
header: &cpio.Header{Name: "/zzz/bazz"},
},
expected: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/zzz/bazz",
header: &cpio.Header{Name: "/zzz/bazz"},
},
},
},
}
for _, st := range subtests {
t.Run(st.name, func(t *testing.T) {
a := archiveItems{items: st.inItems}
a.Add(st.inItem)
if !reflect.DeepEqual(st.expected, a.items) {
t.Fatal("expected:", st.expected, " got: ", a.items)
}
})
}
}

View File

@@ -16,7 +16,6 @@ import (
type DeviceInfo struct { type DeviceInfo struct {
AppendDtb string AppendDtb string
Arch string Arch string
UbootBoardname string
BootimgAppendSEAndroidEnforce string BootimgAppendSEAndroidEnforce string
BootimgBlobpack string BootimgBlobpack string
BootimgDtbSecond string BootimgDtbSecond string
@@ -120,10 +119,7 @@ func nameToField(name string) string {
if p == "deviceinfo" { if p == "deviceinfo" {
continue continue
} }
if len(p) < 1 { field = field + strings.Title(p)
continue
}
field = field + strings.ToUpper(p[:1]) + p[1:]
} }
return field return field

View File

@@ -1,4 +1,4 @@
// Copyright 2022 Clayton Craft <clayton@craftyguy.net> // Copyright 2021 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later // SPDX-License-Identifier: GPL-3.0-or-later
package misc package misc
@@ -10,6 +10,8 @@ import (
"path/filepath" "path/filepath"
) )
type StringSet map[string]bool
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is // Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
// absolute path // absolute path
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) { func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
@@ -46,31 +48,3 @@ func FreeSpace(path string) (uint64, error) {
size := stat.Bavail * uint64(stat.Bsize) size := stat.Bavail * uint64(stat.Bsize)
return size, nil return size, nil
} }
// Merge the contents of "b" into "a", overwriting any previously existing keys
// in "a"
func Merge(a map[string]string, b map[string]string) {
for k, v := range b {
a[k] = v
}
}
// Removes duplicate entries from the given string slice and returns a slice
// with the unique values
func RemoveDuplicates(in []string) (out []string) {
// use a map to "remove" duplicates. the value in the map is totally
// irrelevant
outMap := make(map[string]bool)
for _, s := range in {
if ok := outMap[s]; !ok {
outMap[s] = true
}
}
out = make([]string, 0, len(outMap))
for k := range outMap {
out = append(out, k)
}
return
}

View File

@@ -1,125 +0,0 @@
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package misc
import (
"reflect"
"sort"
"testing"
)
func TestMerge(t *testing.T) {
subtests := []struct {
name string
inA map[string]string
inB map[string]string
expected map[string]string
}{
{
name: "empty B",
inA: map[string]string{
"foo": "bar",
"banana": "airplane",
},
inB: map[string]string{},
expected: map[string]string{
"foo": "bar",
"banana": "airplane",
},
},
{
name: "empty A",
inA: map[string]string{},
inB: map[string]string{
"foo": "bar",
"banana": "airplane",
},
expected: map[string]string{
"foo": "bar",
"banana": "airplane",
},
},
{
name: "both populated, some duplicates",
inA: map[string]string{
"bar": "bazz",
"banana": "yellow",
"guava": "green",
},
inB: map[string]string{
"foo": "bar",
"banana": "airplane",
},
expected: map[string]string{
"foo": "bar",
"guava": "green",
"banana": "airplane",
"bar": "bazz",
},
},
}
for _, st := range subtests {
t.Run(st.name, func(t *testing.T) {
out := st.inA
Merge(out, st.inB)
if !reflect.DeepEqual(st.expected, out) {
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
}
})
}
}
func TestRemoveDuplicates(t *testing.T) {
subtests := []struct {
name string
in []string
expected []string
}{
{
name: "no duplicates",
in: []string{
"foo",
"bar",
"banana",
"airplane",
},
expected: []string{
"foo",
"bar",
"banana",
"airplane",
},
},
{
name: "all duplicates",
in: []string{
"foo",
"foo",
"foo",
"foo",
},
expected: []string{
"foo",
},
},
{
name: "empty",
in: []string{},
expected: []string{},
},
}
for _, st := range subtests {
t.Run(st.name, func(t *testing.T) {
// note: sorting to make comparison easier later
sort.Strings(st.expected)
out := RemoveDuplicates(st.in)
sort.Strings(out)
if !reflect.DeepEqual(st.expected, out) {
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
}
})
}
}