Compare commits
33 Commits
feature/te
...
1.5.1
Author | SHA1 | Date | |
---|---|---|---|
|
2761535e12 | ||
|
1a72589f6f | ||
|
df0b5d66d7 | ||
|
c5f1cffca5 | ||
|
7eed20e35f | ||
|
e71cab485d | ||
|
568fe7f717 | ||
|
d78c6d5a62 | ||
|
c774b610d4 | ||
|
1e00f8f1cc | ||
|
28eed4fd12 | ||
|
c9ac9d9dd6 | ||
|
a4927a8915 | ||
|
029bdd849d | ||
|
8d21ae79c0 | ||
|
4278763cdb | ||
|
a6165b3a8c | ||
|
0eacd26615 | ||
|
e926bb301c | ||
|
961c455d59 | ||
|
4f601087e1 | ||
|
8b18e444a3 | ||
|
62c52e749e | ||
|
463ff1a7e4 | ||
|
3787944141 | ||
|
584a8e4e2a | ||
|
cdf41938b0 | ||
|
3d02037e3a | ||
|
6e2b4af336 | ||
|
9843f8a9c3 | ||
|
4b8a0a0d18 | ||
|
338c89504f | ||
|
c07eafd087 |
@@ -1,13 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
echo "### Running gofmt..."
|
|
||||||
files="$(gofmt -l .)"
|
|
||||||
|
|
||||||
if [ ! -z "$files" ]; then
|
|
||||||
# run gofmt to print out the diff of what needs to be changed
|
|
||||||
gofmt -d -e .
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "### Running staticcheck..."
|
|
||||||
staticcheck ./...
|
|
@@ -3,6 +3,9 @@
|
|||||||
# global settings
|
# global settings
|
||||||
image: alpine:edge
|
image: alpine:edge
|
||||||
|
|
||||||
|
variables:
|
||||||
|
GOFLAGS: "-buildvcs=false"
|
||||||
|
|
||||||
stages:
|
stages:
|
||||||
- lint
|
- lint
|
||||||
- build
|
- build
|
||||||
@@ -21,25 +24,13 @@ stages:
|
|||||||
- merge_requests
|
- merge_requests
|
||||||
- tags
|
- tags
|
||||||
|
|
||||||
# device documentation
|
|
||||||
gofmt linting:
|
|
||||||
stage: lint
|
|
||||||
allow_failure: true
|
|
||||||
<<: *only-default
|
|
||||||
before_script:
|
|
||||||
# specific mirror used because staticcheck hasn't made it to the other mirrors yet...
|
|
||||||
- apk -q update --repository http://dl-4.alpinelinux.org/alpine/edge/testing
|
|
||||||
- apk -q add --repository http://dl-4.alpinelinux.org/alpine/edge/testing go staticcheck
|
|
||||||
script:
|
|
||||||
- .ci/check_linting.sh
|
|
||||||
|
|
||||||
build:
|
build:
|
||||||
stage: build
|
stage: build
|
||||||
<<: *only-default
|
<<: *only-default
|
||||||
before_script:
|
before_script:
|
||||||
- apk -q add go
|
- apk -q add go staticcheck make
|
||||||
script:
|
script:
|
||||||
- go build -v
|
- make test
|
||||||
- go test ./...
|
- make
|
||||||
artifacts:
|
artifacts:
|
||||||
expire_in: 1 week
|
expire_in: 1 week
|
||||||
|
54
Makefile
Normal file
54
Makefile
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
.POSIX:
|
||||||
|
.SUFFIXES:
|
||||||
|
|
||||||
|
PREFIX?=/usr/local
|
||||||
|
BINDIR?=$(PREFIX)/sbin
|
||||||
|
SHAREDIR?=$(PREFIX)/share
|
||||||
|
GO?=go
|
||||||
|
GOFLAGS?=
|
||||||
|
LDFLAGS+=-s -w
|
||||||
|
RM?=rm -f
|
||||||
|
GOTEST=go test -count=1 -race
|
||||||
|
|
||||||
|
GOSRC!=find * -name '*.go'
|
||||||
|
GOSRC+=go.mod go.sum
|
||||||
|
|
||||||
|
all: postmarketos-mkinitfs
|
||||||
|
|
||||||
|
postmarketos-mkinitfs: $(GOSRC)
|
||||||
|
$(GO) build $(GOFLAGS) -ldflags "$(LDFLAGS)" -o postmarketos-mkinitfs
|
||||||
|
|
||||||
|
.PHONY: fmt
|
||||||
|
fmt:
|
||||||
|
gofmt -w .
|
||||||
|
|
||||||
|
test:
|
||||||
|
@if [ `gofmt -l . | wc -l` -ne 0 ]; then \
|
||||||
|
gofmt -d .; \
|
||||||
|
echo "ERROR: source files need reformatting with gofmt"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
@staticcheck ./...
|
||||||
|
|
||||||
|
@$(GOTEST) ./...
|
||||||
|
|
||||||
|
clean:
|
||||||
|
$(RM) postmarketos-mkinitfs
|
||||||
|
|
||||||
|
install: $(DOCS) postmarketos-mkinitfs
|
||||||
|
install -Dm755 postmarketos-mkinitfs -t $(DESTDIR)$(BINDIR)/
|
||||||
|
ln -sf postmarketos-mkinitfs $(DESTDIR)$(BINDIR)/mkinitfs
|
||||||
|
|
||||||
|
.PHONY: checkinstall
|
||||||
|
checkinstall:
|
||||||
|
test -e $(DESTDIR)$(BINDIR)/postmarketos-mkinitfs
|
||||||
|
test -L $(DESTDIR)$(BINDIR)/mkinitfs
|
||||||
|
|
||||||
|
RMDIR_IF_EMPTY:=sh -c '! [ -d $$0 ] || ls -1qA $$0 | grep -q . || rmdir $$0'
|
||||||
|
|
||||||
|
uninstall:
|
||||||
|
$(RM) $(DESTDIR)$(BINDIR)/postmarketos-mkinitfs
|
||||||
|
$(RM) $(DESTDIR)$(BINDIR)/mkinitfs
|
||||||
|
${RMDIR_IF_EMPTY} $(DESTDIR)$(BINDIR)
|
||||||
|
|
||||||
|
.PHONY: all clean install uninstall test
|
45
README.md
Normal file
45
README.md
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
`postmarketos-mkinitfs` is a tool for generating an initramfs (and installing
|
||||||
|
it) on postmarketOS.
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
Building this project requires a Go compiler/toolchain and `make`:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make
|
||||||
|
```
|
||||||
|
|
||||||
|
To install locally:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make install
|
||||||
|
```
|
||||||
|
|
||||||
|
Installation prefix can be set in the generally accepted way with setting
|
||||||
|
`PREFIX`:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make PREFIX=/some/location
|
||||||
|
# make PREFIX=/some/location install
|
||||||
|
```
|
||||||
|
|
||||||
|
Other paths can be modified from the command line as well, see the top section of
|
||||||
|
the `Makefile` for more information.
|
||||||
|
|
||||||
|
Tests (functional and linting) can be executed by using the `test` make target:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ make test
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
The application uses configuration from `/etc/deviceinfo`, and does not support
|
||||||
|
any other options at runtime. It can be run simply by executing:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ postmarketos-mkinitfs
|
||||||
|
```
|
||||||
|
|
||||||
|
For historical reasons, a symlink from `mkinitfs` to `postmarketos-mkinitfs` is
|
||||||
|
also installed by the makefile's `install` target.
|
477
main.go
477
main.go
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
package main
|
package main
|
||||||
@@ -10,7 +10,6 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
@@ -54,7 +53,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// temporary working dir
|
// temporary working dir
|
||||||
workDir, err := ioutil.TempDir("", "mkinitfs")
|
workDir, err := os.MkdirTemp("", "mkinitfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("Unable to create temporary work directory:", err)
|
log.Fatal("Unable to create temporary work directory:", err)
|
||||||
}
|
}
|
||||||
@@ -71,6 +70,14 @@ func main() {
|
|||||||
log.Fatal("generateInitfsExtra: ", err)
|
log.Fatal("generateInitfsExtra: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := copyUbootFiles(workDir, devinfo); errors.Is(err, os.ErrNotExist) {
|
||||||
|
log.Println("u-boot files copying skipped: ", err)
|
||||||
|
} else {
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("copyUbootFiles: ", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Final processing of initramfs / kernel is done by boot-deploy
|
// Final processing of initramfs / kernel is done by boot-deploy
|
||||||
if err := bootDeploy(workDir, *outDir); err != nil {
|
if err := bootDeploy(workDir, *outDir); err != nil {
|
||||||
log.Fatal("bootDeploy: ", err)
|
log.Fatal("bootDeploy: ", err)
|
||||||
@@ -141,71 +148,74 @@ func exists(file string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func getHookFiles(filesdir string) misc.StringSet {
|
func getHookFiles(filesdir string) (files []string, err error) {
|
||||||
fileInfo, err := ioutil.ReadDir(filesdir)
|
fileInfo, err := os.ReadDir(filesdir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
return nil, fmt.Errorf("getHookFiles: unable to read hook file dir: %w", err)
|
||||||
}
|
}
|
||||||
files := make(misc.StringSet)
|
|
||||||
for _, file := range fileInfo {
|
for _, file := range fileInfo {
|
||||||
path := filepath.Join(filesdir, file.Name())
|
path := filepath.Join(filesdir, file.Name())
|
||||||
f, err := os.Open(path)
|
f, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
return nil, fmt.Errorf("getHookFiles: unable to open hook file: %w", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
log.Printf("-- Including files from: %s\n", path)
|
||||||
s := bufio.NewScanner(f)
|
s := bufio.NewScanner(f)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
if err := getFile(files, s.Text(), true); err != nil {
|
if filelist, err := getFiles([]string{s.Text()}, true); err != nil {
|
||||||
log.Fatalf("Unable to find file %q required by %q", s.Text(), path)
|
return nil, fmt.Errorf("getHookFiles: unable to find file %q required by %q", s.Text(), path)
|
||||||
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := s.Err(); err != nil {
|
if err := s.Err(); err != nil {
|
||||||
log.Fatal(err)
|
return nil, fmt.Errorf("getHookFiles: uname to process hook file %q: %w", path, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return files
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recursively list all dependencies for a given ELF binary
|
// Recursively list all dependencies for a given ELF binary
|
||||||
func getBinaryDeps(files misc.StringSet, file string) error {
|
func getBinaryDeps(file string) (files []string, err error) {
|
||||||
// if file is a symlink, resolve dependencies for target
|
// if file is a symlink, resolve dependencies for target
|
||||||
fileStat, err := os.Lstat(file)
|
fileStat, err := os.Lstat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print("getBinaryDeps: failed to stat file")
|
return nil, fmt.Errorf("getBinaryDeps: failed to stat file %q: %w", file, err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Symlink: write symlink to archive then set 'file' to link target
|
// Symlink: write symlink to archive then set 'file' to link target
|
||||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||||
target, err := os.Readlink(file)
|
target, err := os.Readlink(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print("getBinaryDeps: unable to read symlink: ", file)
|
return nil, fmt.Errorf("getBinaryDeps: unable to read symlink %q: %w", file, err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
if !filepath.IsAbs(target) {
|
if !filepath.IsAbs(target) {
|
||||||
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return files, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := getBinaryDeps(files, target); err != nil {
|
binaryDepFiles, err := getBinaryDeps(target)
|
||||||
return err
|
if err != nil {
|
||||||
|
return files, err
|
||||||
}
|
}
|
||||||
return err
|
files = append(files, binaryDepFiles...)
|
||||||
|
return files, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// get dependencies for binaries
|
// get dependencies for binaries
|
||||||
fd, err := elf.Open(file)
|
fd, err := elf.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
return nil, fmt.Errorf("getBinaryDeps: unable to open elf binary %q: %w", file, err)
|
||||||
}
|
}
|
||||||
libs, _ := fd.ImportedLibraries()
|
libs, _ := fd.ImportedLibraries()
|
||||||
fd.Close()
|
fd.Close()
|
||||||
files[file] = false
|
files = append(files, file)
|
||||||
|
|
||||||
if len(libs) == 0 {
|
if len(libs) == 0 {
|
||||||
return err
|
return files, err
|
||||||
}
|
}
|
||||||
|
|
||||||
libdirs := []string{"/usr/lib", "/lib"}
|
libdirs := []string{"/usr/lib", "/lib"}
|
||||||
@@ -214,51 +224,60 @@ func getBinaryDeps(files misc.StringSet, file string) error {
|
|||||||
for _, libdir := range libdirs {
|
for _, libdir := range libdirs {
|
||||||
path := filepath.Join(libdir, lib)
|
path := filepath.Join(libdir, lib)
|
||||||
if _, err := os.Stat(path); err == nil {
|
if _, err := os.Stat(path); err == nil {
|
||||||
err := getBinaryDeps(files, path)
|
binaryDepFiles, err := getBinaryDeps(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return files, err
|
||||||
}
|
}
|
||||||
files[path] = false
|
files = append(files, binaryDepFiles...)
|
||||||
|
files = append(files, path)
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !found {
|
if !found {
|
||||||
log.Fatalf("Unable to locate dependency for %q: %s", file, lib)
|
return nil, fmt.Errorf("getBinaryDeps: unable to locate dependency for %q: %s", file, lib)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFiles(files misc.StringSet, newFiles misc.StringSet, required bool) error {
|
func getFiles(list []string, required bool) (files []string, err error) {
|
||||||
for file := range newFiles {
|
for _, file := range list {
|
||||||
err := getFile(files, file, required)
|
filelist, err := getFile(file, required)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
files = append(files, filelist...)
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
files = misc.RemoveDuplicates(files)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFile(files misc.StringSet, file string, required bool) error {
|
func getFile(file string, required bool) (files []string, err error) {
|
||||||
// Expand glob expression
|
// Expand glob expression
|
||||||
expanded, _ := filepath.Glob(file)
|
expanded, err := filepath.Glob(file)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
if len(expanded) > 0 && expanded[0] != file {
|
if len(expanded) > 0 && expanded[0] != file {
|
||||||
for _, path := range expanded {
|
for _, path := range expanded {
|
||||||
if err := getFile(files, path, required); err != nil {
|
if globFiles, err := getFile(path, required); err != nil {
|
||||||
return err
|
return files, err
|
||||||
|
} else {
|
||||||
|
files = append(files, globFiles...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return misc.RemoveDuplicates(files), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fileInfo, err := os.Stat(file)
|
fileInfo, err := os.Stat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if required {
|
if required {
|
||||||
return errors.New("getFile: File does not exist :" + file)
|
return files, errors.New("getFile: File does not exist :" + file)
|
||||||
}
|
}
|
||||||
return nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if fileInfo.IsDir() {
|
if fileInfo.IsDir() {
|
||||||
@@ -270,27 +289,31 @@ func getFile(files misc.StringSet, file string, required bool) error {
|
|||||||
if f.IsDir() {
|
if f.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return getFile(files, path, required)
|
newFiles, err := getFile(path, required)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
files = append(files, newFiles...)
|
||||||
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return files, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
files = append(files, file)
|
||||||
|
|
||||||
|
// get dependencies for binaries
|
||||||
|
if _, err := elf.Open(file); err == nil {
|
||||||
|
if binaryDepFiles, err := getBinaryDeps(file); err != nil {
|
||||||
|
return files, err
|
||||||
|
} else {
|
||||||
|
files = append(files, binaryDepFiles...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
files[file] = false
|
files = misc.RemoveDuplicates(files)
|
||||||
|
return
|
||||||
// get dependencies for binaries
|
|
||||||
if _, err := elf.Open(file); err != nil {
|
|
||||||
// file is not an elf, so don't resolve lib dependencies
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := getBinaryDeps(files, file); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getOskConfFontPath(oskConfPath string) (string, error) {
|
func getOskConfFontPath(oskConfPath string) (string, error) {
|
||||||
@@ -317,167 +340,199 @@ func getOskConfFontPath(oskConfPath string) (string, error) {
|
|||||||
|
|
||||||
// Get a list of files and their dependencies related to supporting rootfs full
|
// Get a list of files and their dependencies related to supporting rootfs full
|
||||||
// disk (d)encryption
|
// disk (d)encryption
|
||||||
func getFdeFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
func getFdeFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) {
|
||||||
confFiles := misc.StringSet{
|
confFiles := []string{
|
||||||
"/etc/osk.conf": false,
|
"/etc/osk.conf",
|
||||||
"/etc/ts.conf": false,
|
"/etc/ts.conf",
|
||||||
"/etc/pointercal": false,
|
"/etc/pointercal",
|
||||||
"/etc/fb.modes": false,
|
"/etc/fb.modes",
|
||||||
"/etc/directfbrc": false,
|
"/etc/directfbrc",
|
||||||
}
|
}
|
||||||
// TODO: this shouldn't be false? though some files (pointercal) don't always exist...
|
// TODO: this shouldn't be false? though some files (pointercal) don't always exist...
|
||||||
if err := getFiles(files, confFiles, false); err != nil {
|
if files, err = getFiles(confFiles, false); err != nil {
|
||||||
return err
|
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// osk-sdl
|
// osk-sdl
|
||||||
oskFiles := misc.StringSet{
|
oskFiles := []string{
|
||||||
"/usr/bin/osk-sdl": false,
|
"/usr/bin/osk-sdl",
|
||||||
"/sbin/cryptsetup": false,
|
"/sbin/cryptsetup",
|
||||||
"/usr/lib/libGL.so.1": false}
|
"/usr/lib/libGL.so.1",
|
||||||
if err := getFiles(files, oskFiles, true); err != nil {
|
}
|
||||||
return err
|
if filelist, err := getFiles(oskFiles, true); err != nil {
|
||||||
|
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||||
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
}
|
}
|
||||||
|
|
||||||
fontFile, err := getOskConfFontPath("/etc/osk.conf")
|
fontFile, err := getOskConfFontPath("/etc/osk.conf")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, fmt.Errorf("getFdeFiles: failed to add file %q: %w", fontFile, err)
|
||||||
}
|
}
|
||||||
files[fontFile] = false
|
files = append(files, fontFile)
|
||||||
|
|
||||||
// Directfb
|
// Directfb
|
||||||
dfbFiles := make(misc.StringSet)
|
dfbFiles := []string{}
|
||||||
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
|
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
|
||||||
if filepath.Ext(path) == ".so" {
|
if filepath.Ext(path) == ".so" {
|
||||||
dfbFiles[path] = false
|
dfbFiles = append(dfbFiles, path)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print("getBinaryDeps: failed to stat file")
|
return nil, fmt.Errorf("getFdeFiles: failed to add file %w", err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
if err := getFiles(files, dfbFiles, true); err != nil {
|
if filelist, err := getFiles(dfbFiles, true); err != nil {
|
||||||
return err
|
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||||
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// tslib
|
// tslib
|
||||||
tslibFiles := make(misc.StringSet)
|
tslibFiles := []string{}
|
||||||
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
|
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
|
||||||
if filepath.Ext(path) == ".so" {
|
if filepath.Ext(path) == ".so" {
|
||||||
tslibFiles[path] = false
|
tslibFiles = append(tslibFiles, path)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print("getBinaryDeps: failed to stat file")
|
return nil, fmt.Errorf("getFdeFiles: failed to add file: %w", err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
libts, _ := filepath.Glob("/usr/lib/libts*")
|
libts, _ := filepath.Glob("/usr/lib/libts*")
|
||||||
for _, file := range libts {
|
tslibFiles = append(tslibFiles, libts...)
|
||||||
tslibFiles[file] = false
|
if filelist, err := getFiles(tslibFiles, true); err != nil {
|
||||||
}
|
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||||
if err = getFiles(files, tslibFiles, true); err != nil {
|
} else {
|
||||||
return err
|
files = append(files, filelist...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// mesa hw accel
|
// mesa hw accel
|
||||||
if devinfo.MesaDriver != "" {
|
if devinfo.MesaDriver != "" {
|
||||||
mesaFiles := misc.StringSet{
|
mesaFiles := []string{
|
||||||
"/usr/lib/libEGL.so.1": false,
|
"/usr/lib/libEGL.so.1",
|
||||||
"/usr/lib/libGLESv2.so.2": false,
|
"/usr/lib/libGLESv2.so.2",
|
||||||
"/usr/lib/libgbm.so.1": false,
|
"/usr/lib/libgbm.so.1",
|
||||||
"/usr/lib/libudev.so.1": false,
|
"/usr/lib/libudev.so.1",
|
||||||
"/usr/lib/xorg/modules/dri/" + devinfo.MesaDriver + "_dri.so": false,
|
"/usr/lib/xorg/modules/dri/" + devinfo.MesaDriver + "_dri.so",
|
||||||
}
|
}
|
||||||
if err := getFiles(files, mesaFiles, true); err != nil {
|
if filelist, err := getFiles(mesaFiles, true); err != nil {
|
||||||
return err
|
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||||
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getHookScripts(files misc.StringSet) {
|
func getHookScripts() (files []string) {
|
||||||
scripts, _ := filepath.Glob("/etc/postmarketos-mkinitfs/hooks/*.sh")
|
scripts, _ := filepath.Glob("/etc/postmarketos-mkinitfs/hooks/*.sh")
|
||||||
for _, script := range scripts {
|
files = append(files, scripts...)
|
||||||
files[script] = false
|
|
||||||
}
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInitfsExtraFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
func getInitfsExtraFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) {
|
||||||
log.Println("== Generating initramfs extra ==")
|
log.Println("== Generating initramfs extra ==")
|
||||||
binariesExtra := misc.StringSet{
|
binariesExtra := []string{
|
||||||
"/lib/libz.so.1": false,
|
"/lib/libz.so.1",
|
||||||
"/sbin/dmsetup": false,
|
"/sbin/btrfs",
|
||||||
"/sbin/e2fsck": false,
|
"/sbin/dmsetup",
|
||||||
"/usr/sbin/parted": false,
|
"/sbin/e2fsck",
|
||||||
"/usr/sbin/resize2fs": false,
|
"/usr/sbin/parted",
|
||||||
"/usr/sbin/resize.f2fs": false,
|
"/usr/sbin/resize2fs",
|
||||||
|
"/usr/sbin/resize.f2fs",
|
||||||
}
|
}
|
||||||
log.Println("- Including extra binaries")
|
log.Println("- Including extra binaries")
|
||||||
if err := getFiles(files, binariesExtra, true); err != nil {
|
if filelist, err := getFiles(binariesExtra, true); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hook files & scripts
|
||||||
|
if exists("/etc/postmarketos-mkinitfs/files-extra") {
|
||||||
|
log.Println("- Including hook files")
|
||||||
|
var hookFiles []string
|
||||||
|
hookFiles, err := getHookFiles("/etc/postmarketos-mkinitfs/files-extra")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if filelist, err := getFiles(hookFiles, true); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if exists("/usr/bin/osk-sdl") {
|
if exists("/usr/bin/osk-sdl") {
|
||||||
log.Println("- Including FDE support")
|
log.Println("- Including FDE support")
|
||||||
if err := getFdeFiles(files, devinfo); err != nil {
|
if fdeFiles, err := getFdeFiles(devinfo); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
|
} else {
|
||||||
|
files = append(files, fdeFiles...)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Println("- *NOT* including FDE support")
|
log.Println("- *NOT* including FDE support")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInitfsFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
func getInitfsFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) {
|
||||||
log.Println("== Generating initramfs ==")
|
log.Println("== Generating initramfs ==")
|
||||||
requiredFiles := misc.StringSet{
|
requiredFiles := []string{
|
||||||
"/bin/busybox": false,
|
"/bin/busybox",
|
||||||
"/bin/sh": false,
|
"/bin/sh",
|
||||||
"/bin/busybox-extras": false,
|
"/bin/busybox-extras",
|
||||||
"/usr/sbin/telnetd": false,
|
"/usr/sbin/telnetd",
|
||||||
"/sbin/kpartx": false,
|
"/usr/sbin/kpartx",
|
||||||
"/etc/deviceinfo": false,
|
"/etc/deviceinfo",
|
||||||
"/usr/bin/unudhcpd": false,
|
"/usr/bin/unudhcpd",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hook files & scripts
|
// Hook files & scripts
|
||||||
if exists("/etc/postmarketos-mkinitfs/files") {
|
if exists("/etc/postmarketos-mkinitfs/files") {
|
||||||
log.Println("- Including hook files")
|
log.Println("- Including hook files")
|
||||||
hookFiles := getHookFiles("/etc/postmarketos-mkinitfs/files")
|
if hookFiles, err := getHookFiles("/etc/postmarketos-mkinitfs/files"); err != nil {
|
||||||
if err := getFiles(files, hookFiles, true); err != nil {
|
return nil, err
|
||||||
return err
|
} else {
|
||||||
|
if filelist, err := getFiles(hookFiles, true); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Println("- Including hook scripts")
|
log.Println("- Including hook scripts")
|
||||||
getHookScripts(files)
|
hookScripts := getHookScripts()
|
||||||
|
files = append(files, hookScripts...)
|
||||||
|
|
||||||
log.Println("- Including required binaries")
|
log.Println("- Including required binaries")
|
||||||
if err := getFiles(files, requiredFiles, true); err != nil {
|
if filelist, err := getFiles(requiredFiles, true); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInitfsModules(files misc.StringSet, devinfo deviceinfo.DeviceInfo, kernelVer string) error {
|
func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []string, err error) {
|
||||||
log.Println("- Including kernel modules")
|
log.Println("- Including kernel modules")
|
||||||
|
|
||||||
modDir := filepath.Join("/lib/modules", kernelVer)
|
modDir := filepath.Join("/lib/modules", kernelVer)
|
||||||
if !exists(modDir) {
|
if !exists(modDir) {
|
||||||
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
||||||
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// modules.* required by modprobe
|
// modules.* required by modprobe
|
||||||
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
|
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
|
||||||
for _, file := range modprobeFiles {
|
files = append(files, modprobeFiles...)
|
||||||
files[file] = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// module name (without extension), or directory (trailing slash is important! globs OK)
|
// module name (without extension), or directory (trailing slash is important! globs OK)
|
||||||
requiredModules := []string{
|
requiredModules := []string{
|
||||||
@@ -495,16 +550,18 @@ func getInitfsModules(files misc.StringSet, devinfo deviceinfo.DeviceInfo, kerne
|
|||||||
dir = filepath.Join(modDir, dir)
|
dir = filepath.Join(modDir, dir)
|
||||||
dirs, _ := filepath.Glob(dir)
|
dirs, _ := filepath.Glob(dir)
|
||||||
for _, d := range dirs {
|
for _, d := range dirs {
|
||||||
if err := getModulesInDir(files, d); err != nil {
|
if filelist, err := getModulesInDir(d); err != nil {
|
||||||
log.Print("Unable to get modules in dir: ", d)
|
return nil, fmt.Errorf("getInitfsModules: unable to get modules dir %q: %w", d, err)
|
||||||
return err
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if dir == "" {
|
} else if dir == "" {
|
||||||
// item is a module name
|
// item is a module name
|
||||||
if err := getModule(files, file, modDir); err != nil {
|
if filelist, err := getModule(file, modDir); err != nil {
|
||||||
log.Print("Unable to get module: ", file)
|
return nil, fmt.Errorf("getInitfsModules: unable to get module %q: %w", file, err)
|
||||||
return err
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Unknown module entry: %q", item)
|
log.Printf("Unknown module entry: %q", item)
|
||||||
@@ -513,9 +570,10 @@ func getInitfsModules(files misc.StringSet, devinfo deviceinfo.DeviceInfo, kerne
|
|||||||
|
|
||||||
// deviceinfo modules
|
// deviceinfo modules
|
||||||
for _, module := range strings.Fields(devinfo.ModulesInitfs) {
|
for _, module := range strings.Fields(devinfo.ModulesInitfs) {
|
||||||
if err := getModule(files, module, modDir); err != nil {
|
if filelist, err := getModule(module, modDir); err != nil {
|
||||||
log.Print("Unable to get modules from deviceinfo")
|
return nil, fmt.Errorf("getInitfsModules: unable to get modules from deviceinfo: %w", err)
|
||||||
return err
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -524,20 +582,20 @@ func getInitfsModules(files misc.StringSet, devinfo deviceinfo.DeviceInfo, kerne
|
|||||||
for _, modFile := range initfsModFiles {
|
for _, modFile := range initfsModFiles {
|
||||||
f, err := os.Open(modFile)
|
f, err := os.Open(modFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print("getInitfsModules: unable to open mkinitfs modules file: ", modFile)
|
return nil, fmt.Errorf("getInitfsModules: unable to open mkinitfs modules file %q: %w", modFile, err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
s := bufio.NewScanner(f)
|
s := bufio.NewScanner(f)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
if err := getModule(files, s.Text(), modDir); err != nil {
|
if filelist, err := getModule(s.Text(), modDir); err != nil {
|
||||||
log.Print("getInitfsModules: unable to get module file: ", s.Text())
|
return nil, fmt.Errorf("getInitfsModules: unable to get module file %q: %w", s.Text(), err)
|
||||||
return err
|
} else {
|
||||||
|
files = append(files, filelist...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getKernelReleaseFile() (string, error) {
|
func getKernelReleaseFile() (string, error) {
|
||||||
@@ -566,6 +624,50 @@ func getKernelVersion() (string, error) {
|
|||||||
return strings.TrimSpace(string(contents)), nil
|
return strings.TrimSpace(string(contents)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Copy(srcFile, dstFile string) error {
|
||||||
|
out, err := os.Create(dstFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer out.Close()
|
||||||
|
|
||||||
|
in, err := os.Open(srcFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer in.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(out, in)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyUbootFiles(path string, devinfo deviceinfo.DeviceInfo) error {
|
||||||
|
if devinfo.UbootBoardname == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
srcDir := filepath.Join("/usr/share/u-boot", devinfo.UbootBoardname)
|
||||||
|
entries, err := os.ReadDir(srcDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
sourcePath := filepath.Join(srcDir, entry.Name())
|
||||||
|
destPath := filepath.Join(path, entry.Name())
|
||||||
|
|
||||||
|
if err := Copy(sourcePath, destPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo.DeviceInfo) error {
|
func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo.DeviceInfo) error {
|
||||||
initfsArchive, err := archive.New()
|
initfsArchive, err := archive.New()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -577,18 +679,40 @@ func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo
|
|||||||
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
|
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
|
||||||
}
|
}
|
||||||
for _, dir := range requiredDirs {
|
for _, dir := range requiredDirs {
|
||||||
initfsArchive.Dirs[dir] = false
|
if err := initfsArchive.AddItem(dir, dir); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := getInitfsFiles(initfsArchive.Files, devinfo); err != nil {
|
if files, err := getInitfsFiles(devinfo); err != nil {
|
||||||
return err
|
return err
|
||||||
|
} else {
|
||||||
|
items := make(map[string]string)
|
||||||
|
// copy files into a map, where the source(key) and dest(value) are the
|
||||||
|
// same
|
||||||
|
for _, f := range files {
|
||||||
|
items[f] = f
|
||||||
|
}
|
||||||
|
if err := initfsArchive.AddItems(items); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := getInitfsModules(initfsArchive.Files, devinfo, kernVer); err != nil {
|
if files, err := getInitfsModules(devinfo, kernVer); err != nil {
|
||||||
return err
|
return err
|
||||||
|
} else {
|
||||||
|
items := make(map[string]string)
|
||||||
|
// copy files into a map, where the source(key) and dest(value) are the
|
||||||
|
// same
|
||||||
|
for _, f := range files {
|
||||||
|
items[f] = f
|
||||||
|
}
|
||||||
|
if err := initfsArchive.AddItems(items); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil {
|
if err := initfsArchive.AddItem("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -597,13 +721,13 @@ func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo
|
|||||||
splashFiles, _ := filepath.Glob("/usr/share/postmarketos-splashes/*.ppm.gz")
|
splashFiles, _ := filepath.Glob("/usr/share/postmarketos-splashes/*.ppm.gz")
|
||||||
for _, file := range splashFiles {
|
for _, file := range splashFiles {
|
||||||
// splash images are expected at /<file>
|
// splash images are expected at /<file>
|
||||||
if err := initfsArchive.AddFile(file, filepath.Join("/", filepath.Base(file))); err != nil {
|
if err := initfsArchive.AddItem(file, filepath.Join("/", filepath.Base(file))); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// initfs_functions
|
// initfs_functions
|
||||||
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil {
|
if err := initfsArchive.AddItem("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -621,8 +745,19 @@ func generateInitfsExtra(name string, path string, devinfo deviceinfo.DeviceInfo
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := getInitfsExtraFiles(initfsExtraArchive.Files, devinfo); err != nil {
|
if files, err := getInitfsExtraFiles(devinfo); err != nil {
|
||||||
return err
|
return err
|
||||||
|
} else {
|
||||||
|
|
||||||
|
items := make(map[string]string)
|
||||||
|
// copy files into a map, where the source(key) and dest(value) are the
|
||||||
|
// same
|
||||||
|
for _, f := range files {
|
||||||
|
items[f] = f
|
||||||
|
}
|
||||||
|
if err := initfsExtraArchive.AddItems(items); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Println("- Writing and verifying initramfs-extra archive")
|
log.Println("- Writing and verifying initramfs-extra archive")
|
||||||
@@ -637,20 +772,20 @@ func stripExts(file string) string {
|
|||||||
return strings.Split(file, ".")[0]
|
return strings.Split(file, ".")[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
func getModulesInDir(files misc.StringSet, modPath string) error {
|
func getModulesInDir(modPath string) (files []string, err error) {
|
||||||
err := filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error {
|
err = filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error {
|
||||||
// TODO: need to support more extensions?
|
// TODO: need to support more extensions?
|
||||||
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
|
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
files[path] = false
|
files = append(files, path)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
|
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
|
||||||
@@ -659,35 +794,33 @@ func getModulesInDir(files misc.StringSet, modPath string) error {
|
|||||||
// have been built into the kernel
|
// have been built into the kernel
|
||||||
// TODO: look for it in modules.builtin, and make it fatal if it can't be found
|
// TODO: look for it in modules.builtin, and make it fatal if it can't be found
|
||||||
// anywhere
|
// anywhere
|
||||||
func getModule(files misc.StringSet, modName string, modDir string) error {
|
func getModule(modName string, modDir string) (files []string, err error) {
|
||||||
|
|
||||||
modDep := filepath.Join(modDir, "modules.dep")
|
modDep := filepath.Join(modDir, "modules.dep")
|
||||||
if !exists(modDep) {
|
if !exists(modDep) {
|
||||||
log.Fatal("Kernel module.dep not found: ", modDir)
|
return nil, fmt.Errorf("kernel module.dep not found: %s", modDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
fd, err := os.Open(modDep)
|
fd, err := os.Open(modDep)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print("Unable to open modules.dep: ", modDep)
|
return nil, fmt.Errorf("unable to open modules.dep: %w", err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
defer fd.Close()
|
defer fd.Close()
|
||||||
|
|
||||||
deps, err := getModuleDeps(modName, fd)
|
deps, err := getModuleDeps(modName, fd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, dep := range deps {
|
for _, dep := range deps {
|
||||||
p := filepath.Join(modDir, dep)
|
p := filepath.Join(modDir, dep)
|
||||||
if !exists(p) {
|
if !exists(p) {
|
||||||
log.Print(fmt.Sprintf("Tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p))
|
return nil, fmt.Errorf("tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
files[p] = false
|
files = append(files, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the canonicalized name for the module as represented in the given modules.dep io.reader
|
// Get the canonicalized name for the module as represented in the given modules.dep io.reader
|
||||||
|
@@ -6,19 +6,23 @@ package archive
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/flate"
|
"compress/flate"
|
||||||
"github.com/cavaliercoder/go-cpio"
|
"fmt"
|
||||||
"github.com/klauspost/pgzip"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/cavaliercoder/go-cpio"
|
||||||
|
"github.com/klauspost/pgzip"
|
||||||
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Archive struct {
|
type Archive struct {
|
||||||
Dirs misc.StringSet
|
items archiveItems
|
||||||
Files misc.StringSet
|
|
||||||
cpioWriter *cpio.Writer
|
cpioWriter *cpio.Writer
|
||||||
buf *bytes.Buffer
|
buf *bytes.Buffer
|
||||||
}
|
}
|
||||||
@@ -27,83 +31,162 @@ func New() (*Archive, error) {
|
|||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
archive := &Archive{
|
archive := &Archive{
|
||||||
cpioWriter: cpio.NewWriter(buf),
|
cpioWriter: cpio.NewWriter(buf),
|
||||||
Files: make(misc.StringSet),
|
|
||||||
Dirs: make(misc.StringSet),
|
|
||||||
buf: buf,
|
buf: buf,
|
||||||
}
|
}
|
||||||
|
|
||||||
return archive, nil
|
return archive, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type archiveItem struct {
|
||||||
|
sourcePath string
|
||||||
|
header *cpio.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
type archiveItems struct {
|
||||||
|
items []archiveItem
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds the given item to the archiveItems, only if it doesn't already exist in
|
||||||
|
// the list. The items are kept sorted in ascending order.
|
||||||
|
func (a *archiveItems) Add(item archiveItem) {
|
||||||
|
a.Lock()
|
||||||
|
defer a.Unlock()
|
||||||
|
|
||||||
|
if len(a.items) < 1 {
|
||||||
|
// empty list
|
||||||
|
a.items = append(a.items, item)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// find existing item, or index of where new item should go
|
||||||
|
i := sort.Search(len(a.items), func(i int) bool {
|
||||||
|
return strings.Compare(item.header.Name, a.items[i].header.Name) <= 0
|
||||||
|
})
|
||||||
|
|
||||||
|
if i >= len(a.items) {
|
||||||
|
// doesn't exist in list, but would be at the very end
|
||||||
|
a.items = append(a.items, item)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Compare(a.items[i].header.Name, item.header.Name) == 0 {
|
||||||
|
// already in list
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// grow list by 1, shift right at index, and insert new string at index
|
||||||
|
a.items = append(a.items, archiveItem{})
|
||||||
|
copy(a.items[i+1:], a.items[i:])
|
||||||
|
a.items[i] = item
|
||||||
|
}
|
||||||
|
|
||||||
|
// iterate through items and send each one over the returned channel
|
||||||
|
func (a *archiveItems) IterItems() <-chan archiveItem {
|
||||||
|
ch := make(chan archiveItem)
|
||||||
|
go func() {
|
||||||
|
a.RLock()
|
||||||
|
defer a.RUnlock()
|
||||||
|
|
||||||
|
for _, item := range a.items {
|
||||||
|
ch <- item
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
func (archive *Archive) Write(path string, mode os.FileMode) error {
|
func (archive *Archive) Write(path string, mode os.FileMode) error {
|
||||||
if err := archive.writeCpio(); err != nil {
|
if err := archive.writeCpio(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := archive.cpioWriter.Close(); err != nil {
|
if err := archive.cpioWriter.Close(); err != nil {
|
||||||
return err
|
return fmt.Errorf("archive.Write: error closing archive: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write archive to path
|
// Write archive to path
|
||||||
if err := archive.writeCompressed(path, mode); err != nil {
|
if err := archive.writeCompressed(path, mode); err != nil {
|
||||||
log.Print("Unable to write archive to location: ", path)
|
return fmt.Errorf("unable to write archive to location %q: %w", path, err)
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.Chmod(path, mode); err != nil {
|
if err := os.Chmod(path, mode); err != nil {
|
||||||
return err
|
return fmt.Errorf("unable to chmod %q to %s: %w", path, mode, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (archive *Archive) AddFile(file string, dest string) error {
|
// Adds the given items in the map to the archive. The map format is {source path:dest path}.
|
||||||
|
// Internally this just calls AddItem on each key,value pair in the map.
|
||||||
|
func (archive *Archive) AddItems(paths map[string]string) error {
|
||||||
|
for s, d := range paths {
|
||||||
|
if err := archive.AddItem(s, d); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds the given file or directory at "source" to the archive at "dest"
|
||||||
|
func (archive *Archive) AddItem(source string, dest string) error {
|
||||||
|
|
||||||
|
sourceStat, err := os.Lstat(source)
|
||||||
|
if err != nil {
|
||||||
|
e, ok := err.(*os.PathError)
|
||||||
|
if e.Err == syscall.ENOENT && ok {
|
||||||
|
// doesn't exist in current filesystem, assume it's a new directory
|
||||||
|
return archive.addDir(dest)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sourceStat.Mode()&os.ModeDir != 0 {
|
||||||
|
return archive.addDir(dest)
|
||||||
|
}
|
||||||
|
|
||||||
|
return archive.addFile(source, dest)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (archive *Archive) addFile(source string, dest string) error {
|
||||||
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if archive.Files[file] {
|
sourceStat, err := os.Lstat(source)
|
||||||
// Already written to cpio
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fileStat, err := os.Lstat(file)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print("AddFile: failed to stat file: ", file)
|
log.Print("addFile: failed to stat file: ", source)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Symlink: write symlink to archive then set 'file' to link target
|
// Symlink: write symlink to archive then set 'file' to link target
|
||||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
if sourceStat.Mode()&os.ModeSymlink != 0 {
|
||||||
// log.Printf("File %q is a symlink", file)
|
// log.Printf("File %q is a symlink", file)
|
||||||
target, err := os.Readlink(file)
|
target, err := os.Readlink(source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print("AddFile: failed to get symlink target: ", file)
|
log.Print("addFile: failed to get symlink target: ", source)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
destFilename := strings.TrimPrefix(dest, "/")
|
destFilename := strings.TrimPrefix(dest, "/")
|
||||||
hdr := &cpio.Header{
|
|
||||||
Name: destFilename,
|
|
||||||
Linkname: target,
|
|
||||||
Mode: 0644 | cpio.ModeSymlink,
|
|
||||||
Size: int64(len(target)),
|
|
||||||
// Checksum: 1,
|
|
||||||
}
|
|
||||||
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = archive.cpioWriter.Write([]byte(target)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
archive.Files[file] = true
|
archive.items.Add(archiveItem{
|
||||||
|
sourcePath: source,
|
||||||
|
header: &cpio.Header{
|
||||||
|
Name: destFilename,
|
||||||
|
Linkname: target,
|
||||||
|
Mode: 0644 | cpio.ModeSymlink,
|
||||||
|
Size: int64(len(target)),
|
||||||
|
// Checksum: 1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
if filepath.Dir(target) == "." {
|
if filepath.Dir(target) == "." {
|
||||||
target = filepath.Join(filepath.Dir(file), target)
|
target = filepath.Join(filepath.Dir(source), target)
|
||||||
}
|
}
|
||||||
// make sure target is an absolute path
|
// make sure target is an absolute path
|
||||||
if !filepath.IsAbs(target) {
|
if !filepath.IsAbs(target) {
|
||||||
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(source))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -111,34 +194,21 @@ func (archive *Archive) AddFile(file string, dest string) error {
|
|||||||
// TODO: add verbose mode, print stuff like this:
|
// TODO: add verbose mode, print stuff like this:
|
||||||
// log.Printf("symlink: %q, target: %q", file, target)
|
// log.Printf("symlink: %q, target: %q", file, target)
|
||||||
// write symlink target
|
// write symlink target
|
||||||
err = archive.AddFile(target, target)
|
err = archive.addFile(target, target)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// log.Printf("writing file: %q", file)
|
|
||||||
|
|
||||||
fd, err := os.Open(file)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer fd.Close()
|
|
||||||
|
|
||||||
destFilename := strings.TrimPrefix(dest, "/")
|
destFilename := strings.TrimPrefix(dest, "/")
|
||||||
hdr := &cpio.Header{
|
|
||||||
Name: destFilename,
|
|
||||||
Mode: cpio.FileMode(fileStat.Mode().Perm()),
|
|
||||||
Size: fileStat.Size(),
|
|
||||||
// Checksum: 1,
|
|
||||||
}
|
|
||||||
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(archive.cpioWriter, fd); err != nil {
|
archive.items.Add(archiveItem{
|
||||||
return err
|
sourcePath: source,
|
||||||
}
|
header: &cpio.Header{
|
||||||
|
Name: destFilename,
|
||||||
archive.Files[file] = true
|
Mode: cpio.FileMode(sourceStat.Mode().Perm()),
|
||||||
|
Size: sourceStat.Size(),
|
||||||
|
// Checksum: 1,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -176,29 +246,48 @@ func (archive *Archive) writeCompressed(path string, mode os.FileMode) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (archive *Archive) writeCpio() error {
|
func (archive *Archive) writeCpio() error {
|
||||||
// Write any dirs added explicitly
|
// having a transient function for actually adding files to the archive
|
||||||
for dir := range archive.Dirs {
|
// allows the deferred fd.close to run after every copy and prevent having
|
||||||
archive.addDir(dir)
|
// tons of open file handles until the copying is all done
|
||||||
|
copyToArchive := func(source string, header *cpio.Header) error {
|
||||||
|
|
||||||
|
if err := archive.cpioWriter.WriteHeader(header); err != nil {
|
||||||
|
return fmt.Errorf("archive.writeCpio: unable to write header: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// don't copy actual dirs into the archive, writing the header is enough
|
||||||
|
if !header.Mode.IsDir() {
|
||||||
|
if header.Mode.IsRegular() {
|
||||||
|
fd, err := os.Open(source)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("archive.writeCpio: uname to open file %q, %w", source, err)
|
||||||
|
}
|
||||||
|
defer fd.Close()
|
||||||
|
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
|
||||||
|
return fmt.Errorf("archive.writeCpio: unable to write out archive: %w", err)
|
||||||
|
}
|
||||||
|
} else if header.Linkname != "" {
|
||||||
|
// the contents of a symlink is just need the link name
|
||||||
|
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
|
||||||
|
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("archive.writeCpio: unknown type for file: %s", source)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write files and any missing parent dirs
|
for i := range archive.items.IterItems() {
|
||||||
for file, imported := range archive.Files {
|
if err := copyToArchive(i.sourcePath, i.header); err != nil {
|
||||||
if imported {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := archive.AddFile(file, file); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (archive *Archive) addDir(dir string) error {
|
func (archive *Archive) addDir(dir string) error {
|
||||||
if archive.Dirs[dir] {
|
|
||||||
// Already imported
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if dir == "/" {
|
if dir == "/" {
|
||||||
dir = "."
|
dir = "."
|
||||||
}
|
}
|
||||||
@@ -206,19 +295,13 @@ func (archive *Archive) addDir(dir string) error {
|
|||||||
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
|
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
|
||||||
for i, subdir := range subdirs {
|
for i, subdir := range subdirs {
|
||||||
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
|
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
|
||||||
if archive.Dirs[path] {
|
archive.items.Add(archiveItem{
|
||||||
// Subdir already imported
|
sourcePath: path,
|
||||||
continue
|
header: &cpio.Header{
|
||||||
}
|
Name: path,
|
||||||
err := archive.cpioWriter.WriteHeader(&cpio.Header{
|
Mode: cpio.ModeDir | 0755,
|
||||||
Name: path,
|
},
|
||||||
Mode: cpio.ModeDir | 0755,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
archive.Dirs[path] = true
|
|
||||||
// log.Print("wrote dir: ", path)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
189
pkgs/archive/archive_test.go
Normal file
189
pkgs/archive/archive_test.go
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cavaliercoder/go-cpio"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestArchiveItemsAdd(t *testing.T) {
|
||||||
|
subtests := []struct {
|
||||||
|
name string
|
||||||
|
inItems []archiveItem
|
||||||
|
inItem archiveItem
|
||||||
|
expected []archiveItem
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty list",
|
||||||
|
inItems: []archiveItem{},
|
||||||
|
inItem: archiveItem{
|
||||||
|
sourcePath: "/foo/bar",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar"},
|
||||||
|
},
|
||||||
|
expected: []archiveItem{
|
||||||
|
{
|
||||||
|
sourcePath: "/foo/bar",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "already exists",
|
||||||
|
inItems: []archiveItem{
|
||||||
|
{
|
||||||
|
sourcePath: "/bazz/bar",
|
||||||
|
header: &cpio.Header{Name: "/bazz/bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo",
|
||||||
|
header: &cpio.Header{Name: "/foo"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo/bar",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
inItem: archiveItem{
|
||||||
|
sourcePath: "/foo",
|
||||||
|
header: &cpio.Header{Name: "/foo"},
|
||||||
|
},
|
||||||
|
expected: []archiveItem{
|
||||||
|
{
|
||||||
|
sourcePath: "/bazz/bar",
|
||||||
|
header: &cpio.Header{Name: "/bazz/bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo",
|
||||||
|
header: &cpio.Header{Name: "/foo"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo/bar",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "add new",
|
||||||
|
inItems: []archiveItem{
|
||||||
|
{
|
||||||
|
sourcePath: "/bazz/bar",
|
||||||
|
header: &cpio.Header{Name: "/bazz/bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo",
|
||||||
|
header: &cpio.Header{Name: "/foo"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo/bar",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo/bar1",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
inItem: archiveItem{
|
||||||
|
sourcePath: "/foo/bar0",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar0"},
|
||||||
|
},
|
||||||
|
expected: []archiveItem{
|
||||||
|
{
|
||||||
|
sourcePath: "/bazz/bar",
|
||||||
|
header: &cpio.Header{Name: "/bazz/bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo",
|
||||||
|
header: &cpio.Header{Name: "/foo"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo/bar",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo/bar0",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar0"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo/bar1",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "add new at beginning",
|
||||||
|
inItems: []archiveItem{
|
||||||
|
{
|
||||||
|
sourcePath: "/foo",
|
||||||
|
header: &cpio.Header{Name: "/foo"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo/bar",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
inItem: archiveItem{
|
||||||
|
sourcePath: "/bazz/bar",
|
||||||
|
header: &cpio.Header{Name: "/bazz/bar"},
|
||||||
|
},
|
||||||
|
expected: []archiveItem{
|
||||||
|
{
|
||||||
|
sourcePath: "/bazz/bar",
|
||||||
|
header: &cpio.Header{Name: "/bazz/bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo",
|
||||||
|
header: &cpio.Header{Name: "/foo"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo/bar",
|
||||||
|
header: &cpio.Header{Name: "/foo/bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "add new at end",
|
||||||
|
inItems: []archiveItem{
|
||||||
|
{
|
||||||
|
sourcePath: "/bazz/bar",
|
||||||
|
header: &cpio.Header{Name: "/bazz/bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo",
|
||||||
|
header: &cpio.Header{Name: "/foo"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
inItem: archiveItem{
|
||||||
|
sourcePath: "/zzz/bazz",
|
||||||
|
header: &cpio.Header{Name: "/zzz/bazz"},
|
||||||
|
},
|
||||||
|
expected: []archiveItem{
|
||||||
|
{
|
||||||
|
sourcePath: "/bazz/bar",
|
||||||
|
header: &cpio.Header{Name: "/bazz/bar"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/foo",
|
||||||
|
header: &cpio.Header{Name: "/foo"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
sourcePath: "/zzz/bazz",
|
||||||
|
header: &cpio.Header{Name: "/zzz/bazz"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, st := range subtests {
|
||||||
|
t.Run(st.name, func(t *testing.T) {
|
||||||
|
a := archiveItems{items: st.inItems}
|
||||||
|
a.Add(st.inItem)
|
||||||
|
if !reflect.DeepEqual(st.expected, a.items) {
|
||||||
|
t.Fatal("expected:", st.expected, " got: ", a.items)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@@ -16,6 +16,7 @@ import (
|
|||||||
type DeviceInfo struct {
|
type DeviceInfo struct {
|
||||||
AppendDtb string
|
AppendDtb string
|
||||||
Arch string
|
Arch string
|
||||||
|
UbootBoardname string
|
||||||
BootimgAppendSEAndroidEnforce string
|
BootimgAppendSEAndroidEnforce string
|
||||||
BootimgBlobpack string
|
BootimgBlobpack string
|
||||||
BootimgDtbSecond string
|
BootimgDtbSecond string
|
||||||
@@ -119,7 +120,10 @@ func nameToField(name string) string {
|
|||||||
if p == "deviceinfo" {
|
if p == "deviceinfo" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
field = field + strings.Title(p)
|
if len(p) < 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
field = field + strings.ToUpper(p[:1]) + p[1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
return field
|
return field
|
||||||
|
@@ -1,4 +1,4 @@
|
|||||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
package misc
|
package misc
|
||||||
@@ -10,8 +10,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StringSet map[string]bool
|
|
||||||
|
|
||||||
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
||||||
// absolute path
|
// absolute path
|
||||||
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
||||||
@@ -48,3 +46,31 @@ func FreeSpace(path string) (uint64, error) {
|
|||||||
size := stat.Bavail * uint64(stat.Bsize)
|
size := stat.Bavail * uint64(stat.Bsize)
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Merge the contents of "b" into "a", overwriting any previously existing keys
|
||||||
|
// in "a"
|
||||||
|
func Merge(a map[string]string, b map[string]string) {
|
||||||
|
for k, v := range b {
|
||||||
|
a[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removes duplicate entries from the given string slice and returns a slice
|
||||||
|
// with the unique values
|
||||||
|
func RemoveDuplicates(in []string) (out []string) {
|
||||||
|
// use a map to "remove" duplicates. the value in the map is totally
|
||||||
|
// irrelevant
|
||||||
|
outMap := make(map[string]bool)
|
||||||
|
for _, s := range in {
|
||||||
|
if ok := outMap[s]; !ok {
|
||||||
|
outMap[s] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out = make([]string, 0, len(outMap))
|
||||||
|
for k := range outMap {
|
||||||
|
out = append(out, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
125
pkgs/misc/misc_test.go
Normal file
125
pkgs/misc/misc_test.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
package misc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMerge(t *testing.T) {
|
||||||
|
subtests := []struct {
|
||||||
|
name string
|
||||||
|
inA map[string]string
|
||||||
|
inB map[string]string
|
||||||
|
expected map[string]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty B",
|
||||||
|
inA: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
"banana": "airplane",
|
||||||
|
},
|
||||||
|
inB: map[string]string{},
|
||||||
|
expected: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
"banana": "airplane",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty A",
|
||||||
|
inA: map[string]string{},
|
||||||
|
inB: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
"banana": "airplane",
|
||||||
|
},
|
||||||
|
expected: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
"banana": "airplane",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "both populated, some duplicates",
|
||||||
|
inA: map[string]string{
|
||||||
|
"bar": "bazz",
|
||||||
|
"banana": "yellow",
|
||||||
|
"guava": "green",
|
||||||
|
},
|
||||||
|
inB: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
"banana": "airplane",
|
||||||
|
},
|
||||||
|
expected: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
"guava": "green",
|
||||||
|
"banana": "airplane",
|
||||||
|
"bar": "bazz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, st := range subtests {
|
||||||
|
t.Run(st.name, func(t *testing.T) {
|
||||||
|
out := st.inA
|
||||||
|
Merge(out, st.inB)
|
||||||
|
if !reflect.DeepEqual(st.expected, out) {
|
||||||
|
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRemoveDuplicates(t *testing.T) {
|
||||||
|
subtests := []struct {
|
||||||
|
name string
|
||||||
|
in []string
|
||||||
|
expected []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no duplicates",
|
||||||
|
in: []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
"banana",
|
||||||
|
"airplane",
|
||||||
|
},
|
||||||
|
expected: []string{
|
||||||
|
"foo",
|
||||||
|
"bar",
|
||||||
|
"banana",
|
||||||
|
"airplane",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all duplicates",
|
||||||
|
in: []string{
|
||||||
|
"foo",
|
||||||
|
"foo",
|
||||||
|
"foo",
|
||||||
|
"foo",
|
||||||
|
},
|
||||||
|
expected: []string{
|
||||||
|
"foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
in: []string{},
|
||||||
|
expected: []string{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, st := range subtests {
|
||||||
|
t.Run(st.name, func(t *testing.T) {
|
||||||
|
// note: sorting to make comparison easier later
|
||||||
|
sort.Strings(st.expected)
|
||||||
|
out := RemoveDuplicates(st.in)
|
||||||
|
sort.Strings(out)
|
||||||
|
if !reflect.DeepEqual(st.expected, out) {
|
||||||
|
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
Reference in New Issue
Block a user