Compare commits
2 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
d62180f9d6 | ||
|
b7f02ff970 |
@@ -3,9 +3,6 @@
|
||||
# global settings
|
||||
image: alpine:edge
|
||||
|
||||
variables:
|
||||
GOFLAGS: "-buildvcs=false"
|
||||
|
||||
stages:
|
||||
- lint
|
||||
- build
|
||||
@@ -24,13 +21,25 @@ stages:
|
||||
- merge_requests
|
||||
- tags
|
||||
|
||||
# device documentation
|
||||
gofmt linting:
|
||||
stage: lint
|
||||
allow_failure: true
|
||||
<<: *only-default
|
||||
before_script:
|
||||
# specific mirror used because staticcheck hasn't made it to the other mirrors yet...
|
||||
- apk -q update --repository http://dl-4.alpinelinux.org/alpine/edge/testing
|
||||
- apk -q add --repository http://dl-4.alpinelinux.org/alpine/edge/testing go staticcheck
|
||||
script:
|
||||
- .gitlab-ci/check_linting.sh
|
||||
|
||||
build:
|
||||
stage: build
|
||||
<<: *only-default
|
||||
before_script:
|
||||
- apk -q add go staticcheck make
|
||||
- apk -q add go
|
||||
script:
|
||||
- make test
|
||||
- make
|
||||
- go build -v
|
||||
- go test ./...
|
||||
artifacts:
|
||||
expire_in: 1 week
|
||||
|
13
.gitlab-ci/check_linting.sh
Executable file
13
.gitlab-ci/check_linting.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "### Running gofmt..."
|
||||
files="$(gofmt -l .)"
|
||||
|
||||
if [ ! -z "$files" ]; then
|
||||
# run gofmt to print out the diff of what needs to be changed
|
||||
gofmt -d -e .
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "### Running staticcheck..."
|
||||
staticcheck ./...
|
54
Makefile
54
Makefile
@@ -1,54 +0,0 @@
|
||||
.POSIX:
|
||||
.SUFFIXES:
|
||||
|
||||
PREFIX?=/usr/local
|
||||
BINDIR?=$(PREFIX)/sbin
|
||||
SHAREDIR?=$(PREFIX)/share
|
||||
GO?=go
|
||||
GOFLAGS?=
|
||||
LDFLAGS+=-s -w
|
||||
RM?=rm -f
|
||||
GOTEST=go test -count=1 -race
|
||||
|
||||
GOSRC!=find * -name '*.go'
|
||||
GOSRC+=go.mod go.sum
|
||||
|
||||
all: postmarketos-mkinitfs
|
||||
|
||||
postmarketos-mkinitfs: $(GOSRC)
|
||||
$(GO) build $(GOFLAGS) -ldflags "$(LDFLAGS)" -o postmarketos-mkinitfs
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
gofmt -w .
|
||||
|
||||
test:
|
||||
@if [ `gofmt -l . | wc -l` -ne 0 ]; then \
|
||||
gofmt -d .; \
|
||||
echo "ERROR: source files need reformatting with gofmt"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@staticcheck ./...
|
||||
|
||||
@$(GOTEST) ./...
|
||||
|
||||
clean:
|
||||
$(RM) postmarketos-mkinitfs
|
||||
|
||||
install: $(DOCS) postmarketos-mkinitfs
|
||||
install -Dm755 postmarketos-mkinitfs -t $(DESTDIR)$(BINDIR)/
|
||||
ln -sf postmarketos-mkinitfs $(DESTDIR)$(BINDIR)/mkinitfs
|
||||
|
||||
.PHONY: checkinstall
|
||||
checkinstall:
|
||||
test -e $(DESTDIR)$(BINDIR)/postmarketos-mkinitfs
|
||||
test -L $(DESTDIR)$(BINDIR)/mkinitfs
|
||||
|
||||
RMDIR_IF_EMPTY:=sh -c '! [ -d $$0 ] || ls -1qA $$0 | grep -q . || rmdir $$0'
|
||||
|
||||
uninstall:
|
||||
$(RM) $(DESTDIR)$(BINDIR)/postmarketos-mkinitfs
|
||||
$(RM) $(DESTDIR)$(BINDIR)/mkinitfs
|
||||
${RMDIR_IF_EMPTY} $(DESTDIR)$(BINDIR)
|
||||
|
||||
.PHONY: all clean install uninstall test
|
45
README.md
45
README.md
@@ -1,45 +0,0 @@
|
||||
`postmarketos-mkinitfs` is a tool for generating an initramfs (and installing
|
||||
it) on postmarketOS.
|
||||
|
||||
## Building
|
||||
|
||||
Building this project requires a Go compiler/toolchain and `make`:
|
||||
|
||||
```
|
||||
$ make
|
||||
```
|
||||
|
||||
To install locally:
|
||||
|
||||
```
|
||||
$ make install
|
||||
```
|
||||
|
||||
Installation prefix can be set in the generally accepted way with setting
|
||||
`PREFIX`:
|
||||
|
||||
```
|
||||
$ make PREFIX=/some/location
|
||||
# make PREFIX=/some/location install
|
||||
```
|
||||
|
||||
Other paths can be modified from the command line as well, see the top section of
|
||||
the `Makefile` for more information.
|
||||
|
||||
Tests (functional and linting) can be executed by using the `test` make target:
|
||||
|
||||
```
|
||||
$ make test
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The application uses configuration from `/etc/deviceinfo`, and does not support
|
||||
any other options at runtime. It can be run simply by executing:
|
||||
|
||||
```
|
||||
$ postmarketos-mkinitfs
|
||||
```
|
||||
|
||||
For historical reasons, a symlink from `mkinitfs` to `postmarketos-mkinitfs` is
|
||||
also installed by the makefile's `install` target.
|
504
main.go
504
main.go
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package main
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -53,7 +54,7 @@ func main() {
|
||||
}
|
||||
|
||||
// temporary working dir
|
||||
workDir, err := os.MkdirTemp("", "mkinitfs")
|
||||
workDir, err := ioutil.TempDir("", "mkinitfs")
|
||||
if err != nil {
|
||||
log.Fatal("Unable to create temporary work directory:", err)
|
||||
}
|
||||
@@ -70,14 +71,6 @@ func main() {
|
||||
log.Fatal("generateInitfsExtra: ", err)
|
||||
}
|
||||
|
||||
if err := copyUbootFiles(workDir, devinfo); errors.Is(err, os.ErrNotExist) {
|
||||
log.Println("u-boot files copying skipped: ", err)
|
||||
} else {
|
||||
if err != nil {
|
||||
log.Fatal("copyUbootFiles: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Final processing of initramfs / kernel is done by boot-deploy
|
||||
if err := bootDeploy(workDir, *outDir); err != nil {
|
||||
log.Fatal("bootDeploy: ", err)
|
||||
@@ -93,29 +86,18 @@ func bootDeploy(workDir string, outDir string) error {
|
||||
if len(kernels) == 0 {
|
||||
return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*"))
|
||||
}
|
||||
|
||||
// Pick a kernel that does not have suffixes added by boot-deploy
|
||||
var kernFile string
|
||||
for _, f := range kernels {
|
||||
if strings.HasSuffix(f, "-dtb") || strings.HasSuffix(f, "-mtk") {
|
||||
continue
|
||||
}
|
||||
kernFile = f
|
||||
break
|
||||
}
|
||||
|
||||
kernFd, err := os.Open(kernFile)
|
||||
kernFile, err := os.Open(kernels[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer kernFd.Close()
|
||||
defer kernFile.Close()
|
||||
|
||||
kernFileCopy, err := os.Create(filepath.Join(workDir, "vmlinuz"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(kernFileCopy, kernFd); err != nil {
|
||||
if _, err = io.Copy(kernFileCopy, kernFile); err != nil {
|
||||
return err
|
||||
}
|
||||
kernFileCopy.Close()
|
||||
@@ -148,72 +130,72 @@ func exists(file string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func getHookFiles(filesdir string) (files []string, err error) {
|
||||
fileInfo, err := os.ReadDir(filesdir)
|
||||
func getHookFiles(filesdir string) misc.StringSet {
|
||||
fileInfo, err := ioutil.ReadDir(filesdir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Fatal(err)
|
||||
}
|
||||
files := make(misc.StringSet)
|
||||
for _, file := range fileInfo {
|
||||
path := filepath.Join(filesdir, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
if filelist, err := getFiles([]string{s.Text()}, true); err != nil {
|
||||
return nil, fmt.Errorf("unable to find file %q required by %q", s.Text(), path)
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
if !exists(s.Text()) {
|
||||
log.Fatalf("Unable to find file %q required by %q", s.Text(), path)
|
||||
}
|
||||
files[s.Text()] = false
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
return files
|
||||
}
|
||||
|
||||
// Recursively list all dependencies for a given ELF binary
|
||||
func getBinaryDeps(file string) (files []string, err error) {
|
||||
func getBinaryDeps(files misc.StringSet, file string) error {
|
||||
// if file is a symlink, resolve dependencies for target
|
||||
fileStat, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getBinaryDeps: failed to stat file %q: %w", file, err)
|
||||
log.Print("getBinaryDeps: failed to stat file")
|
||||
return err
|
||||
}
|
||||
|
||||
// Symlink: write symlink to archive then set 'file' to link target
|
||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||
target, err := os.Readlink(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getBinaryDeps: unable to read symlink %q: %w", file, err)
|
||||
log.Print("getBinaryDeps: unable to read symlink: ", file)
|
||||
return err
|
||||
}
|
||||
if !filepath.IsAbs(target) {
|
||||
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||
if err != nil {
|
||||
return files, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
binaryDepFiles, err := getBinaryDeps(target)
|
||||
if err != nil {
|
||||
return files, err
|
||||
if err := getBinaryDeps(files, target); err != nil {
|
||||
return err
|
||||
}
|
||||
files = append(files, binaryDepFiles...)
|
||||
return files, err
|
||||
return err
|
||||
}
|
||||
|
||||
// get dependencies for binaries
|
||||
fd, err := elf.Open(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getBinaryDeps: unable to open elf binary %q: %w", file, err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
libs, _ := fd.ImportedLibraries()
|
||||
fd.Close()
|
||||
files = append(files, file)
|
||||
files[file] = false
|
||||
|
||||
if len(libs) == 0 {
|
||||
return files, err
|
||||
return err
|
||||
}
|
||||
|
||||
libdirs := []string{"/usr/lib", "/lib"}
|
||||
@@ -222,96 +204,55 @@ func getBinaryDeps(file string) (files []string, err error) {
|
||||
for _, libdir := range libdirs {
|
||||
path := filepath.Join(libdir, lib)
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
binaryDepFiles, err := getBinaryDeps(path)
|
||||
err := getBinaryDeps(files, path)
|
||||
if err != nil {
|
||||
return files, err
|
||||
return err
|
||||
}
|
||||
files = append(files, binaryDepFiles...)
|
||||
files = append(files, path)
|
||||
files[path] = false
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, fmt.Errorf("getBinaryDeps: unable to locate dependency for %q: %s", file, lib)
|
||||
log.Fatalf("Unable to locate dependency for %q: %s", file, lib)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFiles(list []string, required bool) (files []string, err error) {
|
||||
for _, file := range list {
|
||||
filelist, err := getFile(file, required)
|
||||
func getFiles(files misc.StringSet, newFiles misc.StringSet, required bool) error {
|
||||
for file := range newFiles {
|
||||
err := getFile(files, file, required)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
files = append(files, filelist...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
files = misc.RemoveDuplicates(files)
|
||||
return
|
||||
}
|
||||
|
||||
func getFile(file string, required bool) (files []string, err error) {
|
||||
// Expand glob expression
|
||||
expanded, err := filepath.Glob(file)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(expanded) > 0 && expanded[0] != file {
|
||||
for _, path := range expanded {
|
||||
if globFiles, err := getFile(path, required); err != nil {
|
||||
return files, err
|
||||
} else {
|
||||
files = append(files, globFiles...)
|
||||
}
|
||||
}
|
||||
return misc.RemoveDuplicates(files), nil
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(file)
|
||||
if err != nil {
|
||||
func getFile(files misc.StringSet, file string, required bool) error {
|
||||
if !exists(file) {
|
||||
if required {
|
||||
return files, errors.New("getFile: File does not exist :" + file)
|
||||
return errors.New("getFile: File does not exist :" + file)
|
||||
}
|
||||
return files, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
if fileInfo.IsDir() {
|
||||
// Recurse over directory contents
|
||||
err := filepath.Walk(file, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
newFiles, err := getFile(path, required)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files = append(files, newFiles...)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return files, err
|
||||
}
|
||||
} else {
|
||||
files = append(files, file)
|
||||
files[file] = false
|
||||
|
||||
// get dependencies for binaries
|
||||
if _, err := elf.Open(file); err == nil {
|
||||
if binaryDepFiles, err := getBinaryDeps(file); err != nil {
|
||||
return files, err
|
||||
} else {
|
||||
files = append(files, binaryDepFiles...)
|
||||
}
|
||||
}
|
||||
if _, err := elf.Open(file); err != nil {
|
||||
// file is not an elf, so don't resolve lib dependencies
|
||||
return nil
|
||||
}
|
||||
|
||||
files = misc.RemoveDuplicates(files)
|
||||
return
|
||||
err := getBinaryDeps(files, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getOskConfFontPath(oskConfPath string) (string, error) {
|
||||
@@ -338,199 +279,166 @@ func getOskConfFontPath(oskConfPath string) (string, error) {
|
||||
|
||||
// Get a list of files and their dependencies related to supporting rootfs full
|
||||
// disk (d)encryption
|
||||
func getFdeFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) {
|
||||
confFiles := []string{
|
||||
"/etc/osk.conf",
|
||||
"/etc/ts.conf",
|
||||
"/etc/pointercal",
|
||||
"/etc/fb.modes",
|
||||
"/etc/directfbrc",
|
||||
func getFdeFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||
confFiles := misc.StringSet{
|
||||
"/etc/osk.conf": false,
|
||||
"/etc/ts.conf": false,
|
||||
"/etc/pointercal": false,
|
||||
"/etc/fb.modes": false,
|
||||
"/etc/directfbrc": false,
|
||||
}
|
||||
// TODO: this shouldn't be false? though some files (pointercal) don't always exist...
|
||||
if files, err = getFiles(confFiles, false); err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||
if err := getFiles(files, confFiles, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// osk-sdl
|
||||
oskFiles := []string{
|
||||
"/usr/bin/osk-sdl",
|
||||
"/sbin/cryptsetup",
|
||||
"/usr/lib/libGL.so.1",
|
||||
}
|
||||
if filelist, err := getFiles(oskFiles, true); err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
oskFiles := misc.StringSet{
|
||||
"/usr/bin/osk-sdl": false,
|
||||
"/sbin/cryptsetup": false,
|
||||
"/usr/lib/libGL.so.1": false}
|
||||
if err := getFiles(files, oskFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fontFile, err := getOskConfFontPath("/etc/osk.conf")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add file %q: %w", fontFile, err)
|
||||
return err
|
||||
}
|
||||
files = append(files, fontFile)
|
||||
files[fontFile] = false
|
||||
|
||||
// Directfb
|
||||
dfbFiles := []string{}
|
||||
dfbFiles := make(misc.StringSet)
|
||||
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
|
||||
if filepath.Ext(path) == ".so" {
|
||||
dfbFiles = append(dfbFiles, path)
|
||||
dfbFiles[path] = false
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add file %w", err)
|
||||
log.Print("getBinaryDeps: failed to stat file")
|
||||
return err
|
||||
}
|
||||
if filelist, err := getFiles(dfbFiles, true); err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
if err := getFiles(files, dfbFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// tslib
|
||||
tslibFiles := []string{}
|
||||
tslibFiles := make(misc.StringSet)
|
||||
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
|
||||
if filepath.Ext(path) == ".so" {
|
||||
tslibFiles = append(tslibFiles, path)
|
||||
tslibFiles[path] = false
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add file: %w", err)
|
||||
log.Print("getBinaryDeps: failed to stat file")
|
||||
return err
|
||||
}
|
||||
libts, _ := filepath.Glob("/usr/lib/libts*")
|
||||
tslibFiles = append(tslibFiles, libts...)
|
||||
if filelist, err := getFiles(tslibFiles, true); err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
for _, file := range libts {
|
||||
tslibFiles[file] = false
|
||||
}
|
||||
if err = getFiles(files, tslibFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// mesa hw accel
|
||||
if devinfo.MesaDriver != "" {
|
||||
mesaFiles := []string{
|
||||
"/usr/lib/libEGL.so.1",
|
||||
"/usr/lib/libGLESv2.so.2",
|
||||
"/usr/lib/libgbm.so.1",
|
||||
"/usr/lib/libudev.so.1",
|
||||
"/usr/lib/xorg/modules/dri/" + devinfo.MesaDriver + "_dri.so",
|
||||
mesaFiles := misc.StringSet{
|
||||
"/usr/lib/libEGL.so.1": false,
|
||||
"/usr/lib/libGLESv2.so.2": false,
|
||||
"/usr/lib/libgbm.so.1": false,
|
||||
"/usr/lib/libudev.so.1": false,
|
||||
"/usr/lib/xorg/modules/dri/" + devinfo.MesaDriver + "_dri.so": false,
|
||||
}
|
||||
if filelist, err := getFiles(mesaFiles, true); err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
if err := getFiles(files, mesaFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHookScripts() (files []string) {
|
||||
func getHookScripts(files misc.StringSet) {
|
||||
scripts, _ := filepath.Glob("/etc/postmarketos-mkinitfs/hooks/*.sh")
|
||||
files = append(files, scripts...)
|
||||
|
||||
return
|
||||
for _, script := range scripts {
|
||||
files[script] = false
|
||||
}
|
||||
}
|
||||
|
||||
func getInitfsExtraFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) {
|
||||
func getInitfsExtraFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||
log.Println("== Generating initramfs extra ==")
|
||||
binariesExtra := []string{
|
||||
"/lib/libz.so.1",
|
||||
"/sbin/btrfs",
|
||||
"/sbin/dmsetup",
|
||||
"/sbin/e2fsck",
|
||||
"/usr/sbin/parted",
|
||||
"/usr/sbin/resize2fs",
|
||||
"/usr/sbin/resize.f2fs",
|
||||
binariesExtra := misc.StringSet{
|
||||
"/lib/libz.so.1": false,
|
||||
"/sbin/dmsetup": false,
|
||||
"/sbin/e2fsck": false,
|
||||
"/usr/sbin/parted": false,
|
||||
"/usr/sbin/resize2fs": false,
|
||||
"/usr/sbin/resize.f2fs": false,
|
||||
}
|
||||
log.Println("- Including extra binaries")
|
||||
if filelist, err := getFiles(binariesExtra, true); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
}
|
||||
|
||||
// Hook files & scripts
|
||||
if exists("/etc/postmarketos-mkinitfs/files-extra") {
|
||||
log.Println("- Including hook files")
|
||||
var hookFiles []string
|
||||
hookFiles, err := getHookFiles("/etc/postmarketos-mkinitfs/files-extra")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if filelist, err := getFiles(hookFiles, true); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
}
|
||||
if err := getFiles(files, binariesExtra, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists("/usr/bin/osk-sdl") {
|
||||
log.Println("- Including FDE support")
|
||||
if fdeFiles, err := getFdeFiles(devinfo); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
files = append(files, fdeFiles...)
|
||||
if err := getFdeFiles(files, devinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Println("- *NOT* including FDE support")
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInitfsFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) {
|
||||
func getInitfsFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||
log.Println("== Generating initramfs ==")
|
||||
requiredFiles := []string{
|
||||
"/bin/busybox",
|
||||
"/bin/sh",
|
||||
"/bin/busybox-extras",
|
||||
"/usr/sbin/telnetd",
|
||||
"/sbin/kpartx",
|
||||
"/etc/deviceinfo",
|
||||
"/usr/bin/unudhcpd",
|
||||
requiredFiles := misc.StringSet{
|
||||
"/bin/busybox": false,
|
||||
"/bin/sh": false,
|
||||
"/bin/busybox-extras": false,
|
||||
"/usr/sbin/telnetd": false,
|
||||
"/sbin/kpartx": false,
|
||||
"/etc/deviceinfo": false,
|
||||
}
|
||||
|
||||
// Hook files & scripts
|
||||
if exists("/etc/postmarketos-mkinitfs/files") {
|
||||
log.Println("- Including hook files")
|
||||
if hookFiles, err := getHookFiles("/etc/postmarketos-mkinitfs/files"); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
if filelist, err := getFiles(hookFiles, true); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
hookFiles := getHookFiles("/etc/postmarketos-mkinitfs/files")
|
||||
if err := getFiles(files, hookFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("- Including hook scripts")
|
||||
hookScripts := getHookScripts()
|
||||
files = append(files, hookScripts...)
|
||||
getHookScripts(files)
|
||||
|
||||
log.Println("- Including required binaries")
|
||||
if filelist, err := getFiles(requiredFiles, true); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
if err := getFiles(files, requiredFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []string, err error) {
|
||||
func getInitfsModules(files misc.StringSet, devinfo deviceinfo.DeviceInfo, kernelVer string) error {
|
||||
log.Println("- Including kernel modules")
|
||||
|
||||
modDir := filepath.Join("/lib/modules", kernelVer)
|
||||
if !exists(modDir) {
|
||||
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
||||
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// modules.* required by modprobe
|
||||
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
|
||||
files = append(files, modprobeFiles...)
|
||||
for _, file := range modprobeFiles {
|
||||
files[file] = false
|
||||
}
|
||||
|
||||
// module name (without extension), or directory (trailing slash is important! globs OK)
|
||||
requiredModules := []string{
|
||||
@@ -548,18 +456,16 @@ func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []
|
||||
dir = filepath.Join(modDir, dir)
|
||||
dirs, _ := filepath.Glob(dir)
|
||||
for _, d := range dirs {
|
||||
if filelist, err := getModulesInDir(d); err != nil {
|
||||
return nil, fmt.Errorf("getInitfsModules: unable to get modules dir %q: %w", d, err)
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
if err := getModulesInDir(files, d); err != nil {
|
||||
log.Print("Unable to get modules in dir: ", d)
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if dir == "" {
|
||||
// item is a module name
|
||||
if filelist, err := getModule(file, modDir); err != nil {
|
||||
return nil, fmt.Errorf("getInitfsModules: unable to get module %q: %w", file, err)
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
if err := getModule(files, file, modDir); err != nil {
|
||||
log.Print("Unable to get module: ", file)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unknown module entry: %q", item)
|
||||
@@ -568,10 +474,9 @@ func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []
|
||||
|
||||
// deviceinfo modules
|
||||
for _, module := range strings.Fields(devinfo.ModulesInitfs) {
|
||||
if filelist, err := getModule(module, modDir); err != nil {
|
||||
return nil, fmt.Errorf("getInitfsModules: unable to get modules from deviceinfo: %w", err)
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
if err := getModule(files, module, modDir); err != nil {
|
||||
log.Print("Unable to get modules from deviceinfo")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -580,20 +485,20 @@ func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []
|
||||
for _, modFile := range initfsModFiles {
|
||||
f, err := os.Open(modFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getInitfsModules: unable to open mkinitfs modules file %q: %w", modFile, err)
|
||||
log.Print("getInitfsModules: unable to open mkinitfs modules file: ", modFile)
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
if filelist, err := getModule(s.Text(), modDir); err != nil {
|
||||
return nil, fmt.Errorf("getInitfsModules: unable to get module file %q: %w", s.Text(), err)
|
||||
} else {
|
||||
files = append(files, filelist...)
|
||||
if err := getModule(files, s.Text(), modDir); err != nil {
|
||||
log.Print("getInitfsModules: unable to get module file: ", s.Text())
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func getKernelReleaseFile() (string, error) {
|
||||
@@ -622,50 +527,6 @@ func getKernelVersion() (string, error) {
|
||||
return strings.TrimSpace(string(contents)), nil
|
||||
}
|
||||
|
||||
func Copy(srcFile, dstFile string) error {
|
||||
out, err := os.Create(dstFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer out.Close()
|
||||
|
||||
in, err := os.Open(srcFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyUbootFiles(path string, devinfo deviceinfo.DeviceInfo) error {
|
||||
if devinfo.UbootBoardname == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
srcDir := filepath.Join("/usr/share/u-boot", devinfo.UbootBoardname)
|
||||
entries, err := os.ReadDir(srcDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
sourcePath := filepath.Join(srcDir, entry.Name())
|
||||
destPath := filepath.Join(path, entry.Name())
|
||||
|
||||
if err := Copy(sourcePath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo.DeviceInfo) error {
|
||||
initfsArchive, err := archive.New()
|
||||
if err != nil {
|
||||
@@ -677,40 +538,18 @@ func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo
|
||||
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
|
||||
}
|
||||
for _, dir := range requiredDirs {
|
||||
if err := initfsArchive.AddItem(dir, dir); err != nil {
|
||||
return err
|
||||
}
|
||||
initfsArchive.Dirs[dir] = false
|
||||
}
|
||||
|
||||
if files, err := getInitfsFiles(devinfo); err != nil {
|
||||
if err := getInitfsFiles(initfsArchive.Files, devinfo); err != nil {
|
||||
return err
|
||||
} else {
|
||||
items := make(map[string]string)
|
||||
// copy files into a map, where the source(key) and dest(value) are the
|
||||
// same
|
||||
for _, f := range files {
|
||||
items[f] = f
|
||||
}
|
||||
if err := initfsArchive.AddItems(items); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if files, err := getInitfsModules(devinfo, kernVer); err != nil {
|
||||
if err := getInitfsModules(initfsArchive.Files, devinfo, kernVer); err != nil {
|
||||
return err
|
||||
} else {
|
||||
items := make(map[string]string)
|
||||
// copy files into a map, where the source(key) and dest(value) are the
|
||||
// same
|
||||
for _, f := range files {
|
||||
items[f] = f
|
||||
}
|
||||
if err := initfsArchive.AddItems(items); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := initfsArchive.AddItem("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil {
|
||||
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -719,13 +558,13 @@ func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo
|
||||
splashFiles, _ := filepath.Glob("/usr/share/postmarketos-splashes/*.ppm.gz")
|
||||
for _, file := range splashFiles {
|
||||
// splash images are expected at /<file>
|
||||
if err := initfsArchive.AddItem(file, filepath.Join("/", filepath.Base(file))); err != nil {
|
||||
if err := initfsArchive.AddFile(file, filepath.Join("/", filepath.Base(file))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// initfs_functions
|
||||
if err := initfsArchive.AddItem("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil {
|
||||
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -743,19 +582,8 @@ func generateInitfsExtra(name string, path string, devinfo deviceinfo.DeviceInfo
|
||||
return err
|
||||
}
|
||||
|
||||
if files, err := getInitfsExtraFiles(devinfo); err != nil {
|
||||
if err := getInitfsExtraFiles(initfsExtraArchive.Files, devinfo); err != nil {
|
||||
return err
|
||||
} else {
|
||||
|
||||
items := make(map[string]string)
|
||||
// copy files into a map, where the source(key) and dest(value) are the
|
||||
// same
|
||||
for _, f := range files {
|
||||
items[f] = f
|
||||
}
|
||||
if err := initfsExtraArchive.AddItems(items); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("- Writing and verifying initramfs-extra archive")
|
||||
@@ -770,20 +598,20 @@ func stripExts(file string) string {
|
||||
return strings.Split(file, ".")[0]
|
||||
}
|
||||
|
||||
func getModulesInDir(modPath string) (files []string, err error) {
|
||||
err = filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error {
|
||||
func getModulesInDir(files misc.StringSet, modPath string) error {
|
||||
err := filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error {
|
||||
// TODO: need to support more extensions?
|
||||
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
|
||||
return nil
|
||||
}
|
||||
files = append(files, path)
|
||||
files[path] = false
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
|
||||
@@ -792,33 +620,35 @@ func getModulesInDir(modPath string) (files []string, err error) {
|
||||
// have been built into the kernel
|
||||
// TODO: look for it in modules.builtin, and make it fatal if it can't be found
|
||||
// anywhere
|
||||
func getModule(modName string, modDir string) (files []string, err error) {
|
||||
func getModule(files misc.StringSet, modName string, modDir string) error {
|
||||
|
||||
modDep := filepath.Join(modDir, "modules.dep")
|
||||
if !exists(modDep) {
|
||||
return nil, fmt.Errorf("kernel module.dep not found: %s", modDir)
|
||||
log.Fatal("Kernel module.dep not found: ", modDir)
|
||||
}
|
||||
|
||||
fd, err := os.Open(modDep)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open modules.dep: %w", err)
|
||||
log.Print("Unable to open modules.dep: ", modDep)
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
deps, err := getModuleDeps(modName, fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
for _, dep := range deps {
|
||||
p := filepath.Join(modDir, dep)
|
||||
if !exists(p) {
|
||||
return nil, fmt.Errorf("tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p)
|
||||
log.Print(fmt.Sprintf("Tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p))
|
||||
return err
|
||||
}
|
||||
files = append(files, p)
|
||||
files[p] = false
|
||||
}
|
||||
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the canonicalized name for the module as represented in the given modules.dep io.reader
|
||||
|
@@ -6,23 +6,19 @@ package archive
|
||||
import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"fmt"
|
||||
"github.com/cavaliercoder/go-cpio"
|
||||
"github.com/klauspost/pgzip"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/cavaliercoder/go-cpio"
|
||||
"github.com/klauspost/pgzip"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
||||
)
|
||||
|
||||
type Archive struct {
|
||||
items archiveItems
|
||||
Dirs misc.StringSet
|
||||
Files misc.StringSet
|
||||
cpioWriter *cpio.Writer
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
@@ -31,162 +27,83 @@ func New() (*Archive, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
archive := &Archive{
|
||||
cpioWriter: cpio.NewWriter(buf),
|
||||
Files: make(misc.StringSet),
|
||||
Dirs: make(misc.StringSet),
|
||||
buf: buf,
|
||||
}
|
||||
|
||||
return archive, nil
|
||||
}
|
||||
|
||||
type archiveItem struct {
|
||||
sourcePath string
|
||||
header *cpio.Header
|
||||
}
|
||||
|
||||
type archiveItems struct {
|
||||
items []archiveItem
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// Adds the given item to the archiveItems, only if it doesn't already exist in
|
||||
// the list. The items are kept sorted in ascending order.
|
||||
func (a *archiveItems) Add(item archiveItem) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
if len(a.items) < 1 {
|
||||
// empty list
|
||||
a.items = append(a.items, item)
|
||||
return
|
||||
}
|
||||
|
||||
// find existing item, or index of where new item should go
|
||||
i := sort.Search(len(a.items), func(i int) bool {
|
||||
return strings.Compare(item.header.Name, a.items[i].header.Name) <= 0
|
||||
})
|
||||
|
||||
if i >= len(a.items) {
|
||||
// doesn't exist in list, but would be at the very end
|
||||
a.items = append(a.items, item)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.Compare(a.items[i].header.Name, item.header.Name) == 0 {
|
||||
// already in list
|
||||
return
|
||||
}
|
||||
|
||||
// grow list by 1, shift right at index, and insert new string at index
|
||||
a.items = append(a.items, archiveItem{})
|
||||
copy(a.items[i+1:], a.items[i:])
|
||||
a.items[i] = item
|
||||
}
|
||||
|
||||
// iterate through items and send each one over the returned channel
|
||||
func (a *archiveItems) IterItems() <-chan archiveItem {
|
||||
ch := make(chan archiveItem)
|
||||
go func() {
|
||||
a.RLock()
|
||||
defer a.RUnlock()
|
||||
|
||||
for _, item := range a.items {
|
||||
ch <- item
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (archive *Archive) Write(path string, mode os.FileMode) error {
|
||||
if err := archive.writeCpio(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := archive.cpioWriter.Close(); err != nil {
|
||||
return fmt.Errorf("archive.Write: error closing archive: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Write archive to path
|
||||
if err := archive.writeCompressed(path, mode); err != nil {
|
||||
return fmt.Errorf("unable to write archive to location %q: %w", path, err)
|
||||
log.Print("Unable to write archive to location: ", path)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return fmt.Errorf("unable to chmod %q to %s: %w", path, mode, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds the given items in the map to the archive. The map format is {source path:dest path}.
|
||||
// Internally this just calls AddItem on each key,value pair in the map.
|
||||
func (archive *Archive) AddItems(paths map[string]string) error {
|
||||
for s, d := range paths {
|
||||
if err := archive.AddItem(s, d); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds the given file or directory at "source" to the archive at "dest"
|
||||
func (archive *Archive) AddItem(source string, dest string) error {
|
||||
|
||||
sourceStat, err := os.Lstat(source)
|
||||
if err != nil {
|
||||
e, ok := err.(*os.PathError)
|
||||
if e.Err == syscall.ENOENT && ok {
|
||||
// doesn't exist in current filesystem, assume it's a new directory
|
||||
return archive.addDir(dest)
|
||||
}
|
||||
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
|
||||
}
|
||||
|
||||
if sourceStat.Mode()&os.ModeDir != 0 {
|
||||
return archive.addDir(dest)
|
||||
}
|
||||
|
||||
return archive.addFile(source, dest)
|
||||
}
|
||||
|
||||
func (archive *Archive) addFile(source string, dest string) error {
|
||||
func (archive *Archive) AddFile(file string, dest string) error {
|
||||
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sourceStat, err := os.Lstat(source)
|
||||
if archive.Files[file] {
|
||||
// Already written to cpio
|
||||
return nil
|
||||
}
|
||||
|
||||
fileStat, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
log.Print("addFile: failed to stat file: ", source)
|
||||
log.Print("AddFile: failed to stat file: ", file)
|
||||
return err
|
||||
}
|
||||
|
||||
// Symlink: write symlink to archive then set 'file' to link target
|
||||
if sourceStat.Mode()&os.ModeSymlink != 0 {
|
||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||
// log.Printf("File %q is a symlink", file)
|
||||
target, err := os.Readlink(source)
|
||||
target, err := os.Readlink(file)
|
||||
if err != nil {
|
||||
log.Print("addFile: failed to get symlink target: ", source)
|
||||
log.Print("AddFile: failed to get symlink target: ", file)
|
||||
return err
|
||||
}
|
||||
|
||||
destFilename := strings.TrimPrefix(dest, "/")
|
||||
|
||||
archive.items.Add(archiveItem{
|
||||
sourcePath: source,
|
||||
header: &cpio.Header{
|
||||
hdr := &cpio.Header{
|
||||
Name: destFilename,
|
||||
Linkname: target,
|
||||
Mode: 0644 | cpio.ModeSymlink,
|
||||
Size: int64(len(target)),
|
||||
// Checksum: 1,
|
||||
},
|
||||
})
|
||||
}
|
||||
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = archive.cpioWriter.Write([]byte(target)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
archive.Files[file] = true
|
||||
if filepath.Dir(target) == "." {
|
||||
target = filepath.Join(filepath.Dir(source), target)
|
||||
target = filepath.Join(filepath.Dir(file), target)
|
||||
}
|
||||
// make sure target is an absolute path
|
||||
if !filepath.IsAbs(target) {
|
||||
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(source))
|
||||
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -194,21 +111,34 @@ func (archive *Archive) addFile(source string, dest string) error {
|
||||
// TODO: add verbose mode, print stuff like this:
|
||||
// log.Printf("symlink: %q, target: %q", file, target)
|
||||
// write symlink target
|
||||
err = archive.addFile(target, target)
|
||||
err = archive.AddFile(target, target)
|
||||
return err
|
||||
}
|
||||
|
||||
destFilename := strings.TrimPrefix(dest, "/")
|
||||
// log.Printf("writing file: %q", file)
|
||||
|
||||
archive.items.Add(archiveItem{
|
||||
sourcePath: source,
|
||||
header: &cpio.Header{
|
||||
fd, err := os.Open(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
destFilename := strings.TrimPrefix(dest, "/")
|
||||
hdr := &cpio.Header{
|
||||
Name: destFilename,
|
||||
Mode: cpio.FileMode(sourceStat.Mode().Perm()),
|
||||
Size: sourceStat.Size(),
|
||||
Mode: cpio.FileMode(fileStat.Mode().Perm()),
|
||||
Size: fileStat.Size(),
|
||||
// Checksum: 1,
|
||||
},
|
||||
})
|
||||
}
|
||||
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(archive.cpioWriter, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
archive.Files[file] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -246,48 +176,29 @@ func (archive *Archive) writeCompressed(path string, mode os.FileMode) error {
|
||||
}
|
||||
|
||||
func (archive *Archive) writeCpio() error {
|
||||
// having a transient function for actually adding files to the archive
|
||||
// allows the deferred fd.close to run after every copy and prevent having
|
||||
// tons of open file handles until the copying is all done
|
||||
copyToArchive := func(source string, header *cpio.Header) error {
|
||||
|
||||
if err := archive.cpioWriter.WriteHeader(header); err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: unable to write header: %w", err)
|
||||
// Write any dirs added explicitly
|
||||
for dir := range archive.Dirs {
|
||||
archive.addDir(dir)
|
||||
}
|
||||
|
||||
// don't copy actual dirs into the archive, writing the header is enough
|
||||
if !header.Mode.IsDir() {
|
||||
if header.Mode.IsRegular() {
|
||||
fd, err := os.Open(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: uname to open file %q, %w", source, err)
|
||||
// Write files and any missing parent dirs
|
||||
for file, imported := range archive.Files {
|
||||
if imported {
|
||||
continue
|
||||
}
|
||||
defer fd.Close()
|
||||
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: unable to write out archive: %w", err)
|
||||
}
|
||||
} else if header.Linkname != "" {
|
||||
// the contents of a symlink is just need the link name
|
||||
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("archive.writeCpio: unknown type for file: %s", source)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range archive.items.IterItems() {
|
||||
if err := copyToArchive(i.sourcePath, i.header); err != nil {
|
||||
if err := archive.AddFile(file, file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) addDir(dir string) error {
|
||||
if archive.Dirs[dir] {
|
||||
// Already imported
|
||||
return nil
|
||||
}
|
||||
if dir == "/" {
|
||||
dir = "."
|
||||
}
|
||||
@@ -295,13 +206,19 @@ func (archive *Archive) addDir(dir string) error {
|
||||
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
|
||||
for i, subdir := range subdirs {
|
||||
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
|
||||
archive.items.Add(archiveItem{
|
||||
sourcePath: path,
|
||||
header: &cpio.Header{
|
||||
if archive.Dirs[path] {
|
||||
// Subdir already imported
|
||||
continue
|
||||
}
|
||||
err := archive.cpioWriter.WriteHeader(&cpio.Header{
|
||||
Name: path,
|
||||
Mode: cpio.ModeDir | 0755,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
archive.Dirs[path] = true
|
||||
// log.Print("wrote dir: ", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@@ -1,189 +0,0 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/cavaliercoder/go-cpio"
|
||||
)
|
||||
|
||||
func TestArchiveItemsAdd(t *testing.T) {
|
||||
subtests := []struct {
|
||||
name string
|
||||
inItems []archiveItem
|
||||
inItem archiveItem
|
||||
expected []archiveItem
|
||||
}{
|
||||
{
|
||||
name: "empty list",
|
||||
inItems: []archiveItem{},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "already exists",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add new",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar1",
|
||||
header: &cpio.Header{Name: "/foo/bar1"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/foo/bar0",
|
||||
header: &cpio.Header{Name: "/foo/bar0"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar0",
|
||||
header: &cpio.Header{Name: "/foo/bar0"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar1",
|
||||
header: &cpio.Header{Name: "/foo/bar1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add new at beginning",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add new at end",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/zzz/bazz",
|
||||
header: &cpio.Header{Name: "/zzz/bazz"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/zzz/bazz",
|
||||
header: &cpio.Header{Name: "/zzz/bazz"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, st := range subtests {
|
||||
t.Run(st.name, func(t *testing.T) {
|
||||
a := archiveItems{items: st.inItems}
|
||||
a.Add(st.inItem)
|
||||
if !reflect.DeepEqual(st.expected, a.items) {
|
||||
t.Fatal("expected:", st.expected, " got: ", a.items)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -16,7 +16,6 @@ import (
|
||||
type DeviceInfo struct {
|
||||
AppendDtb string
|
||||
Arch string
|
||||
UbootBoardname string
|
||||
BootimgAppendSEAndroidEnforce string
|
||||
BootimgBlobpack string
|
||||
BootimgDtbSecond string
|
||||
@@ -120,10 +119,7 @@ func nameToField(name string) string {
|
||||
if p == "deviceinfo" {
|
||||
continue
|
||||
}
|
||||
if len(p) < 1 {
|
||||
continue
|
||||
}
|
||||
field = field + strings.ToUpper(p[:1]) + p[1:]
|
||||
field = field + strings.Title(p)
|
||||
}
|
||||
|
||||
return field
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package misc
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type StringSet map[string]bool
|
||||
|
||||
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
||||
// absolute path
|
||||
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
||||
@@ -46,31 +48,3 @@ func FreeSpace(path string) (uint64, error) {
|
||||
size := stat.Bavail * uint64(stat.Bsize)
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// Merge the contents of "b" into "a", overwriting any previously existing keys
|
||||
// in "a"
|
||||
func Merge(a map[string]string, b map[string]string) {
|
||||
for k, v := range b {
|
||||
a[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Removes duplicate entries from the given string slice and returns a slice
|
||||
// with the unique values
|
||||
func RemoveDuplicates(in []string) (out []string) {
|
||||
// use a map to "remove" duplicates. the value in the map is totally
|
||||
// irrelevant
|
||||
outMap := make(map[string]bool)
|
||||
for _, s := range in {
|
||||
if ok := outMap[s]; !ok {
|
||||
outMap[s] = true
|
||||
}
|
||||
}
|
||||
|
||||
out = make([]string, 0, len(outMap))
|
||||
for k := range outMap {
|
||||
out = append(out, k)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
@@ -1,125 +0,0 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package misc
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
subtests := []struct {
|
||||
name string
|
||||
inA map[string]string
|
||||
inB map[string]string
|
||||
expected map[string]string
|
||||
}{
|
||||
{
|
||||
name: "empty B",
|
||||
inA: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
inB: map[string]string{},
|
||||
expected: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty A",
|
||||
inA: map[string]string{},
|
||||
inB: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "both populated, some duplicates",
|
||||
inA: map[string]string{
|
||||
"bar": "bazz",
|
||||
"banana": "yellow",
|
||||
"guava": "green",
|
||||
},
|
||||
inB: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"foo": "bar",
|
||||
"guava": "green",
|
||||
"banana": "airplane",
|
||||
"bar": "bazz",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, st := range subtests {
|
||||
t.Run(st.name, func(t *testing.T) {
|
||||
out := st.inA
|
||||
Merge(out, st.inB)
|
||||
if !reflect.DeepEqual(st.expected, out) {
|
||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveDuplicates(t *testing.T) {
|
||||
subtests := []struct {
|
||||
name string
|
||||
in []string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "no duplicates",
|
||||
in: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"banana",
|
||||
"airplane",
|
||||
},
|
||||
expected: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"banana",
|
||||
"airplane",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all duplicates",
|
||||
in: []string{
|
||||
"foo",
|
||||
"foo",
|
||||
"foo",
|
||||
"foo",
|
||||
},
|
||||
expected: []string{
|
||||
"foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
in: []string{},
|
||||
expected: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, st := range subtests {
|
||||
t.Run(st.name, func(t *testing.T) {
|
||||
// note: sorting to make comparison easier later
|
||||
sort.Strings(st.expected)
|
||||
out := RemoveDuplicates(st.in)
|
||||
sort.Strings(out)
|
||||
if !reflect.DeepEqual(st.expected, out) {
|
||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user