Compare commits
197 Commits
Author | SHA1 | Date | |
---|---|---|---|
8f1ff5c374 | |||
|
15c95f6b13 | ||
|
bab4be1a89 | ||
|
1428f27b4a | ||
|
f6e4773507 | ||
|
7a07a16ecb | ||
|
4f6af31a7a | ||
|
39ee6752fd | ||
|
0edee0afbd | ||
|
95edf678f4 | ||
|
be6a6da417 | ||
|
4e771ab96f | ||
|
4d7dd79bcf | ||
|
d63e600614 | ||
|
741c0553d5 | ||
|
cd97df108a | ||
|
1fed057a82 | ||
|
5efdb9f170 | ||
|
81de8b438d | ||
|
af9a0f0ca5 | ||
|
014563fdbc | ||
|
83282187c2 | ||
|
eda4f3ba22 | ||
|
866d37b85d | ||
|
1334fdfa26 | ||
|
56db822b88 | ||
|
631d6078c2 | ||
|
e5f14d70a6 | ||
|
dd5cdeace5 | ||
|
1a99953aa2 | ||
|
e2f4e6254f | ||
|
2efeb4510d | ||
|
f0b3c1d992 | ||
|
98bdb23f01 | ||
|
6618e564ad | ||
|
6df75d5682 | ||
|
9475572811 | ||
|
2b467eb77f | ||
|
d77e1cd11d | ||
|
2ec78bfcfc | ||
|
fedf55b573 | ||
|
30681d2f0a | ||
|
74de5f9798 | ||
|
2f4937c52d | ||
|
b1e44d8ec2 | ||
|
c87b926a53 | ||
|
b2cdfe9da4 | ||
|
a15c02f3aa | ||
|
0054fde90d | ||
|
dceef20121 | ||
|
25017f3a3b | ||
|
67ce1a9c2e | ||
|
8fac3004a6 | ||
|
a15a3ad781 | ||
|
1e8580a0a1 | ||
|
e6ee43826d | ||
|
7bdd68800d | ||
|
80098d29c6 | ||
|
67f1839ddc | ||
|
baf76ed614 | ||
|
27e271b904 | ||
|
1ac85b12fe | ||
|
f7f42bc2d4 | ||
|
c62a1f9ddb | ||
|
c9de619f98 | ||
|
a519769979 | ||
|
128a48dd24 | ||
|
499136e83a | ||
|
78f8fa32fb | ||
|
d03257981f | ||
|
307fb1889f | ||
|
fa3d3268d7 | ||
|
8b67848d5c | ||
|
31ab72edbc | ||
|
bd239c0365 | ||
|
a4c3b9ff96 | ||
|
8f505ffdc8 | ||
|
fb00e9e94b | ||
|
7c2377d0c8 | ||
|
f24d0139c9 | ||
|
5e2f975bd3 | ||
|
786e09d855 | ||
|
ba1e1a77db | ||
|
fd11f4a627 | ||
|
322d6bb754 | ||
|
1f4d8737e8 | ||
|
52fc741ba8 | ||
|
31b7eb34ee | ||
|
4e97990804 | ||
|
c01b48ad25 | ||
|
6aec4d564c | ||
|
6eb01e91e6 | ||
|
790cf47060 | ||
|
4074eada55 | ||
|
a7c4fe83ce | ||
|
06f86aadc9 | ||
|
d87a33a751 | ||
|
d1e150242d | ||
|
5968622f60 | ||
|
0179a0ca5c | ||
|
33c61b3c94 | ||
|
e4fb6cef70 | ||
|
4ae678d8ce | ||
|
71c2a87d56 | ||
|
9bb326be91 | ||
|
0545d68b1d | ||
|
c6e79551f4 | ||
|
a9f4281fbd | ||
|
bb50041257 | ||
|
09c897e737 | ||
|
a8bb10ce9c | ||
|
5e65ace958 | ||
|
cbcd4408e3 | ||
|
ad560591e1 | ||
|
89f1e067da | ||
|
4259478755 | ||
|
347668caa3 | ||
|
b0e28b4215 | ||
|
c1d96f699c | ||
|
25c3c03e24 | ||
|
07c8c711c7 | ||
|
e772fe0c87 | ||
|
6f05222018 | ||
|
c23af8b541 | ||
|
bd09de9232 | ||
|
22692e48d2 | ||
|
6c2f7b972b | ||
|
e5002f5750 | ||
|
662f559286 | ||
|
a4be663e13 | ||
|
14873015c0 | ||
|
6fdc8937b5 | ||
|
fb52066d8f | ||
|
b7f520cba4 | ||
|
31bf38f663 | ||
|
71d8131bb0 | ||
|
8b99b5f45b | ||
|
e8854ff88d | ||
|
1eb35cf8ef | ||
|
696633629a | ||
|
d9b68843a3 | ||
|
93005527e0 | ||
|
1c5f16762f | ||
|
af97d4654f | ||
|
b25c9bd390 | ||
|
1a0d00e39f | ||
|
af3c47c784 | ||
|
e7bbd1cadf | ||
|
1531d7e790 | ||
|
6d77b7a2d1 | ||
|
2dd83da480 | ||
|
e00e5faf6e | ||
|
5e07b63084 | ||
|
95582ee034 | ||
|
94584050ee | ||
|
e0977b4ac1 | ||
|
4176a8a661 | ||
|
73fd85f68c | ||
|
7e80107bbe | ||
|
f714f110a1 | ||
|
690d008643 | ||
|
731a805a9e | ||
|
b90624d7dd | ||
|
2a75cf9b4e | ||
|
d52cc16c88 | ||
|
112b572dc2 | ||
|
0c0a85f3bb | ||
|
2761535e12 | ||
|
1a72589f6f | ||
|
df0b5d66d7 | ||
|
c5f1cffca5 | ||
|
7eed20e35f | ||
|
e71cab485d | ||
|
568fe7f717 | ||
|
d78c6d5a62 | ||
|
c774b610d4 | ||
|
1e00f8f1cc | ||
|
28eed4fd12 | ||
|
c9ac9d9dd6 | ||
|
a4927a8915 | ||
|
029bdd849d | ||
|
8d21ae79c0 | ||
|
4278763cdb | ||
|
a6165b3a8c | ||
|
0eacd26615 | ||
|
e926bb301c | ||
|
961c455d59 | ||
|
4f601087e1 | ||
|
8b18e444a3 | ||
|
62c52e749e | ||
|
463ff1a7e4 | ||
|
3787944141 | ||
|
584a8e4e2a | ||
|
cdf41938b0 | ||
|
3d02037e3a | ||
|
6e2b4af336 | ||
|
9843f8a9c3 |
@@ -1,13 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "### Running gofmt..."
|
||||
files="$(gofmt -l .)"
|
||||
|
||||
if [ ! -z "$files" ]; then
|
||||
# run gofmt to print out the diff of what needs to be changed
|
||||
gofmt -d -e .
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "### Running staticcheck..."
|
||||
staticcheck ./...
|
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1 +1,5 @@
|
||||
/postmarketos-mkinitfs
|
||||
/*.1
|
||||
/*.tar.gz
|
||||
/*.sha512
|
||||
/mkinitfs
|
||||
/vendor
|
||||
|
161
.gitlab-ci.yml
161
.gitlab-ci.yml
@@ -3,43 +3,144 @@
|
||||
# global settings
|
||||
image: alpine:edge
|
||||
|
||||
variables:
|
||||
GOFLAGS: "-buildvcs=false"
|
||||
PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/mkinitfs-vendor-${CI_COMMIT_TAG}/${CI_COMMIT_TAG}"
|
||||
CI_TRON_TEMPLATE_PROJECT: &ci-tron-template-project postmarketOS/ci-common
|
||||
CI_TRON_JOB_TEMPLATE_PROJECT_URL: $CI_SERVER_URL/$CI_TRON_TEMPLATE_PROJECT
|
||||
CI_TRON_JOB_TEMPLATE_COMMIT: &ci-tron-template-commit 7c95b5f2d53533e8722abf57c73e558168e811f3
|
||||
|
||||
include:
|
||||
- project: *ci-tron-template-project
|
||||
ref: *ci-tron-template-commit
|
||||
file: '/ci-tron/common.yml'
|
||||
|
||||
stages:
|
||||
- lint
|
||||
- build
|
||||
- hardware tests
|
||||
- vendor
|
||||
- release
|
||||
|
||||
# defaults for "only"
|
||||
# We need to run the CI jobs in a "merge request specific context", if CI is
|
||||
# running in a merge request. Otherwise the environment variable that holds the
|
||||
# merge request ID is not available. This means, we must set the "only"
|
||||
# variable accordingly - and if we only do it for one job, all other jobs will
|
||||
# not get executed. So have the defaults here, and use them in all jobs that
|
||||
# should run on both the master branch, and in merge requests.
|
||||
# https://docs.gitlab.com/ee/ci/merge_request_pipelines/index.html#excluding-certain-jobs
|
||||
.only-default: &only-default
|
||||
only:
|
||||
- master
|
||||
- merge_requests
|
||||
- tags
|
||||
|
||||
# device documentation
|
||||
gofmt linting:
|
||||
stage: lint
|
||||
allow_failure: true
|
||||
<<: *only-default
|
||||
before_script:
|
||||
# specific mirror used because staticcheck hasn't made it to the other mirrors yet...
|
||||
- apk -q update --repository http://dl-4.alpinelinux.org/alpine/edge/testing
|
||||
- apk -q add --repository http://dl-4.alpinelinux.org/alpine/edge/testing go staticcheck
|
||||
script:
|
||||
- .ci/check_linting.sh
|
||||
workflow:
|
||||
rules:
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
- if: $CI_COMMIT_BRANCH == 'master'
|
||||
- if: '$CI_COMMIT_TAG != null'
|
||||
|
||||
build:
|
||||
stage: build
|
||||
<<: *only-default
|
||||
variables:
|
||||
GOTEST: "gotestsum --junitfile report.xml --format testname -- ./..."
|
||||
parallel:
|
||||
matrix:
|
||||
- TAG: shared
|
||||
- TAG: arm64
|
||||
tags:
|
||||
- $TAG
|
||||
before_script:
|
||||
- apk -q add go
|
||||
- apk -q add go gotestsum staticcheck make scdoc
|
||||
script:
|
||||
- go build -v
|
||||
- go test ./...
|
||||
- make test
|
||||
- make
|
||||
after_script:
|
||||
- mkdir -p rootfs/usr/sbin
|
||||
- cp mkinitfs rootfs/usr/sbin
|
||||
artifacts:
|
||||
expire_in: 1 week
|
||||
reports:
|
||||
junit: report.xml
|
||||
paths:
|
||||
- rootfs
|
||||
|
||||
.qemu-common:
|
||||
variables:
|
||||
DEVICE_NAME: qemu-$CPU_ARCH
|
||||
KERNEL_VARIANT: lts
|
||||
rules:
|
||||
- if: '$CI_COMMIT_TAG != null'
|
||||
when: never
|
||||
|
||||
.build-ci-tron-qemu:
|
||||
stage: hardware tests
|
||||
extends:
|
||||
- .pmos-ci-tron-build-boot-artifacts
|
||||
- .qemu-common
|
||||
variables:
|
||||
INSTALL_PACKAGES: device-${DEVICE_NAME} device-${DEVICE_NAME}-kernel-${KERNEL_VARIANT} postmarketos-mkinitfs-hook-ci
|
||||
|
||||
build-ci-tron-qemu-amd64:
|
||||
extends:
|
||||
- .build-ci-tron-qemu
|
||||
needs:
|
||||
- job: "build"
|
||||
parallel:
|
||||
matrix:
|
||||
- TAG: shared
|
||||
variables:
|
||||
CPU_ARCH: amd64
|
||||
|
||||
build-ci-tron-qemu-aarch64:
|
||||
extends:
|
||||
- .build-ci-tron-qemu
|
||||
needs:
|
||||
- job: "build"
|
||||
parallel:
|
||||
matrix:
|
||||
- TAG: arm64
|
||||
variables:
|
||||
CPU_ARCH: aarch64
|
||||
|
||||
.test-ci-tron-qemu:
|
||||
stage: hardware tests
|
||||
extends:
|
||||
- .pmos-ci-tron-initramfs-test
|
||||
- .qemu-common
|
||||
dependencies: []
|
||||
variables:
|
||||
CI_TRON_KERNEL__URL: "glartifact://build-ci-tron-qemu-$CPU_ARCH/${CI_TRON__PMB_EXPORT_PATH}/vmlinuz-${KERNEL_VARIANT}"
|
||||
CI_TRON_INITRAMFS__INITRAMFS__URL: "glartifact://build-ci-tron-qemu-$CPU_ARCH/${CI_TRON__PMB_EXPORT_PATH}/initramfs"
|
||||
CI_TRON_KERNEL_CMDLINE__DEVICEINFO: 'console=tty1 console=ttyS0,115200 PMOS_FORCE_PARTITION_RESIZE'
|
||||
|
||||
test-ci-tron-qemu-amd64:
|
||||
extends:
|
||||
- .test-ci-tron-qemu
|
||||
- .pmos-ci-tron-runner-qemu-amd64
|
||||
needs:
|
||||
- job: 'build-ci-tron-qemu-amd64'
|
||||
artifacts: false
|
||||
variables:
|
||||
CPU_ARCH: amd64
|
||||
|
||||
test-ci-tron-qemu-aarch64:
|
||||
extends:
|
||||
- .test-ci-tron-qemu
|
||||
- .pmos-ci-tron-runner-qemu-aarch64
|
||||
needs:
|
||||
- job: 'build-ci-tron-qemu-aarch64'
|
||||
artifacts: false
|
||||
variables:
|
||||
CPU_ARCH: aarch64
|
||||
|
||||
vendor:
|
||||
stage: vendor
|
||||
image: alpine:latest
|
||||
rules:
|
||||
- if: '$CI_COMMIT_TAG != null'
|
||||
before_script:
|
||||
- apk -q add curl go make
|
||||
script:
|
||||
- |
|
||||
make VERSION="${CI_COMMIT_TAG}" vendor
|
||||
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file "mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz" "${PACKAGE_REGISTRY_URL}/"
|
||||
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file "mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512" "${PACKAGE_REGISTRY_URL}/"
|
||||
|
||||
release:
|
||||
stage: release
|
||||
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
||||
rules:
|
||||
- if: '$CI_COMMIT_TAG != null'
|
||||
script:
|
||||
- |
|
||||
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \
|
||||
--assets-link "{\"name\":\"mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz\",\"url\":\"${PACKAGE_REGISTRY_URL}/mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz\"}" \
|
||||
--assets-link "{\"name\":\"mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512\",\"url\":\"${PACKAGE_REGISTRY_URL}/mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512\"}"
|
||||
|
80
Makefile
Normal file
80
Makefile
Normal file
@@ -0,0 +1,80 @@
|
||||
.POSIX:
|
||||
.SUFFIXES: .1 .1.scd
|
||||
|
||||
VERSION?=$(shell git describe --tags --dirty 2>/dev/null || echo 0.0.0)
|
||||
VPATH=doc
|
||||
VENDORED="mkinitfs-vendor-$(VERSION)"
|
||||
PREFIX?=/usr/local
|
||||
BINDIR?=$(PREFIX)/sbin
|
||||
MANDIR?=$(PREFIX)/share/man
|
||||
SHAREDIR?=$(PREFIX)/share
|
||||
GO?=go
|
||||
GOFLAGS?=
|
||||
LDFLAGS+=-s -w -X main.Version=$(VERSION)
|
||||
RM?=rm -f
|
||||
GOTESTOPTS?=-count=1 -race
|
||||
GOTEST?=go test ./...
|
||||
DISABLE_GOGC?=
|
||||
|
||||
ifeq ($(DISABLE_GOGC),1)
|
||||
LDFLAGS+=-X main.DisableGC=true
|
||||
endif
|
||||
|
||||
GOSRC!=find * -name '*.go'
|
||||
GOSRC+=go.mod go.sum
|
||||
|
||||
DOCS := \
|
||||
mkinitfs.1
|
||||
|
||||
all: mkinitfs $(DOCS)
|
||||
|
||||
mkinitfs: $(GOSRC)
|
||||
$(GO) build $(GOFLAGS) -ldflags "$(LDFLAGS)" -o mkinitfs ./cmd/mkinitfs
|
||||
|
||||
.1.scd.1:
|
||||
scdoc < $< > $@
|
||||
|
||||
doc: $(DOCS)
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
gofmt -w .
|
||||
|
||||
test:
|
||||
@if [ `gofmt -l . | wc -l` -ne 0 ]; then \
|
||||
gofmt -d .; \
|
||||
echo "ERROR: source files need reformatting with gofmt"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@staticcheck ./...
|
||||
|
||||
$(GOTEST) $(GOTESTOPTS)
|
||||
|
||||
clean:
|
||||
$(RM) mkinitfs $(DOCS)
|
||||
$(RM) $(VENDORED)*
|
||||
|
||||
install: $(DOCS) mkinitfs
|
||||
install -Dm755 mkinitfs -t $(DESTDIR)$(BINDIR)/
|
||||
install -Dm644 mkinitfs.1 -t $(DESTDIR)$(MANDIR)/man1/
|
||||
|
||||
.PHONY: checkinstall
|
||||
checkinstall:
|
||||
test -e $(DESTDIR)$(BINDIR)/mkinitfs
|
||||
test -e $(DESTDIR)$(MANDIR)/man1/mkinitfs.1
|
||||
|
||||
RMDIR_IF_EMPTY:=sh -c '! [ -d $$0 ] || ls -1qA $$0 | grep -q . || rmdir $$0'
|
||||
|
||||
vendor:
|
||||
go mod vendor
|
||||
tar czf $(VENDORED).tar.gz vendor/
|
||||
sha512sum $(VENDORED).tar.gz > $(VENDORED).tar.gz.sha512
|
||||
$(RM) -rf vendor
|
||||
|
||||
uninstall:
|
||||
$(RM) $(DESTDIR)$(BINDIR)/mkinitfs
|
||||
${RMDIR_IF_EMPTY} $(DESTDIR)$(BINDIR)
|
||||
$(RM) $(DESTDIR)$(MANDIR)/man1/mkinitfs.1
|
||||
$(RMDIR_IF_EMPTY) $(DESTDIR)$(MANDIR)/man1
|
||||
|
||||
.PHONY: all clean install uninstall test vendor
|
48
README.md
Normal file
48
README.md
Normal file
@@ -0,0 +1,48 @@
|
||||
`mkinitfs` is a tool for generating an initramfs. It was originally designed
|
||||
for postmarketOS, but a long term design goal is to be as distro-agnostic as
|
||||
possible. It's capable of generating a split initramfs, in the style used by
|
||||
postmarketOS, and supports running `boot-deploy` to install/finalize boot files
|
||||
on a device.
|
||||
|
||||
## Building
|
||||
|
||||
Building this project requires a Go compiler/toolchain and `make`:
|
||||
|
||||
```
|
||||
$ make
|
||||
```
|
||||
|
||||
To install locally:
|
||||
|
||||
```
|
||||
$ make install
|
||||
```
|
||||
|
||||
Installation prefix can be set in the generally accepted way with setting
|
||||
`PREFIX`:
|
||||
|
||||
```
|
||||
$ make PREFIX=/some/location
|
||||
# make PREFIX=/some/location install
|
||||
```
|
||||
|
||||
Other paths can be modified from the command line as well, see the top section of
|
||||
the `Makefile` for more information.
|
||||
|
||||
Tests (functional and linting) can be executed by using the `test` make target:
|
||||
|
||||
```
|
||||
$ make test
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The tool can be run with no options:
|
||||
|
||||
```
|
||||
# mkinitfs
|
||||
```
|
||||
|
||||
Configuration is done through a series of flat text files that list directories
|
||||
and files, and by placing scripts in specific directories. See `man 1 mkinitfs`
|
||||
for more information.
|
197
cmd/mkinitfs/main.go
Normal file
197
cmd/mkinitfs/main.go
Normal file
@@ -0,0 +1,197 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/archive"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/bootdeploy"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookdirs"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookfiles"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookscripts"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/initramfs"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/modules"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
|
||||
)
|
||||
|
||||
// set at build time
|
||||
var Version string
|
||||
var DisableGC string
|
||||
|
||||
func main() {
|
||||
// To allow working around silly GC-related issues, like https://gitlab.com/qemu-project/qemu/-/issues/2560
|
||||
if strings.ToLower(DisableGC) == "true" {
|
||||
debug.SetGCPercent(-1)
|
||||
}
|
||||
|
||||
retCode := 0
|
||||
defer func() { os.Exit(retCode) }()
|
||||
|
||||
outDir := flag.String("d", "/boot", "Directory to output initfs(-extra) and other boot files")
|
||||
|
||||
var showVersion bool
|
||||
flag.BoolVar(&showVersion, "version", false, "Print version and quit.")
|
||||
|
||||
var disableBootDeploy bool
|
||||
flag.BoolVar(&disableBootDeploy, "no-bootdeploy", false, "Disable running 'boot-deploy' after generating archives.")
|
||||
flag.Parse()
|
||||
|
||||
if showVersion {
|
||||
fmt.Printf("%s - %s\n", filepath.Base(os.Args[0]), Version)
|
||||
return
|
||||
}
|
||||
|
||||
log.Default().SetFlags(log.Lmicroseconds)
|
||||
|
||||
var devinfo deviceinfo.DeviceInfo
|
||||
deverr_usr := devinfo.ReadDeviceinfo("/usr/share/deviceinfo/deviceinfo")
|
||||
deverr_etc := devinfo.ReadDeviceinfo("/etc/deviceinfo")
|
||||
if deverr_etc != nil && deverr_usr != nil {
|
||||
log.Println("Error reading deviceinfo")
|
||||
log.Println("\t/usr/share/deviceinfo/deviceinfo:", deverr_usr)
|
||||
log.Println("\t/etc/deviceinfo:", deverr_etc)
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
|
||||
defer misc.TimeFunc(time.Now(), "mkinitfs")
|
||||
|
||||
kernVer, err := osutil.GetKernelVersion()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
|
||||
// temporary working dir
|
||||
workDir, err := os.MkdirTemp("", "mkinitfs")
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
log.Println("unable to create temporary work directory")
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
e := os.RemoveAll(workDir)
|
||||
if e != nil && err == nil {
|
||||
log.Println(e)
|
||||
log.Println("unable to remove temporary work directory")
|
||||
}
|
||||
}()
|
||||
|
||||
log.Print("Generating for kernel version: ", kernVer)
|
||||
log.Print("Output directory: ", *outDir)
|
||||
|
||||
//
|
||||
// initramfs
|
||||
//
|
||||
// deviceinfo.InitfsCompression needs a little more post-processing
|
||||
compressionFormat, compressionLevel := archive.ExtractFormatLevel(devinfo.InitfsCompression)
|
||||
log.Printf("== Generating %s ==\n", "initramfs")
|
||||
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
|
||||
|
||||
start := time.Now()
|
||||
initramfsAr := archive.New(compressionFormat, compressionLevel)
|
||||
initfs := initramfs.New([]filelist.FileLister{
|
||||
hookdirs.New("/usr/share/mkinitfs/dirs"),
|
||||
hookdirs.New("/etc/mkinitfs/dirs"),
|
||||
hookfiles.New("/usr/share/mkinitfs/files"),
|
||||
hookfiles.New("/etc/mkinitfs/files"),
|
||||
hookscripts.New("/usr/share/mkinitfs/hooks", "/hooks"),
|
||||
hookscripts.New("/etc/mkinitfs/hooks", "/hooks"),
|
||||
hookscripts.New("/usr/share/mkinitfs/hooks-cleanup", "/hooks-cleanup"),
|
||||
hookscripts.New("/etc/mkinitfs/hooks-cleanup", "/hooks-cleanup"),
|
||||
//modules.New("/usr/share/mkinitfs/modules"),
|
||||
modules.New("/etc/mkinitfs/modules"),
|
||||
})
|
||||
initfsExtra := initramfs.New([]filelist.FileLister{
|
||||
hookfiles.New("/usr/share/mkinitfs/files-extra"),
|
||||
hookfiles.New("/etc/mkinitfs/files-extra"),
|
||||
hookscripts.New("/usr/share/mkinitfs/hooks-extra", "/hooks-extra"),
|
||||
hookscripts.New("/etc/mkinitfs/hooks-extra", "/hooks-extra"),
|
||||
modules.New("/usr/share/mkinitfs/modules-extra"),
|
||||
modules.New("/etc/mkinitfs/modules-extra"),
|
||||
})
|
||||
|
||||
if err := initramfsAr.AddItems(initfs); err != nil {
|
||||
log.Println(err)
|
||||
log.Println("failed to generate: ", "initramfs")
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
|
||||
// Include initramfs-extra files in the initramfs if not making a separate
|
||||
// archive
|
||||
if !devinfo.CreateInitfsExtra {
|
||||
if err := initramfsAr.AddItems(initfsExtra); err != nil {
|
||||
log.Println(err)
|
||||
log.Println("failed to generate: ", "initramfs")
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := initramfsAr.Write(filepath.Join(workDir, "initramfs"), os.FileMode(0644)); err != nil {
|
||||
log.Println(err)
|
||||
log.Println("failed to generate: ", "initramfs")
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
misc.TimeFunc(start, "initramfs")
|
||||
|
||||
if devinfo.CreateInitfsExtra {
|
||||
//
|
||||
// initramfs-extra
|
||||
//
|
||||
// deviceinfo.InitfsExtraCompression needs a little more post-processing
|
||||
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
|
||||
log.Printf("== Generating %s ==\n", "initramfs-extra")
|
||||
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
|
||||
|
||||
start = time.Now()
|
||||
initramfsExtraAr := archive.New(compressionFormat, compressionLevel)
|
||||
if err := initramfsExtraAr.AddItemsExclude(initfsExtra, initfs); err != nil {
|
||||
log.Println(err)
|
||||
log.Println("failed to generate: ", "initramfs-extra")
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
if err := initramfsExtraAr.Write(filepath.Join(workDir, "initramfs-extra"), os.FileMode(0644)); err != nil {
|
||||
log.Println(err)
|
||||
log.Println("failed to generate: ", "initramfs-extra")
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
misc.TimeFunc(start, "initramfs-extra")
|
||||
}
|
||||
|
||||
// Final processing of initramfs / kernel is done by boot-deploy
|
||||
if !disableBootDeploy {
|
||||
if err := bootDeploy(workDir, *outDir, devinfo); err != nil {
|
||||
log.Println(err)
|
||||
log.Println("boot-deploy failed")
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func bootDeploy(workDir string, outDir string, devinfo deviceinfo.DeviceInfo) error {
|
||||
log.Print("== Using boot-deploy to finalize/install files ==")
|
||||
defer misc.TimeFunc(time.Now(), "boot-deploy")
|
||||
|
||||
bd := bootdeploy.New(workDir, outDir, devinfo)
|
||||
return bd.Run()
|
||||
}
|
196
doc/mkinitfs.1.scd
Normal file
196
doc/mkinitfs.1.scd
Normal file
@@ -0,0 +1,196 @@
|
||||
mkinitfs(1) "mkinitfs"
|
||||
|
||||
# NAME
|
||||
|
||||
mkinitfs
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
mkinitfs is a simple, generic tool for generating an initramfs, primarily
|
||||
developed for use in postmarketOS
|
||||
|
||||
# CONCEPTS
|
||||
|
||||
mkinitfs is designed to generate two archives, "initramfs" and
|
||||
"initramfs-extra", however it's possible to configure mkinitfs to run without
|
||||
generating an initramfs-extra archive. mkinitfs is primarily configured through
|
||||
the placement of files in specific directories detailed below in the
|
||||
*DIRECTORIES* section. *deviceinfo* files are also used to provide other
|
||||
configuration options to mkinitfs, these are covered under the *DEVICEINFO*
|
||||
section below.
|
||||
|
||||
mkinitfs does not provide an init script, or any boot-time logic, it's purpose
|
||||
is purely to generate the archive(s). mkinitfs does call *boot-deploy* after
|
||||
creating the archive(s), in order to install/deploy them and any other relevant
|
||||
boot-related items onto the system.
|
||||
|
||||
Design goals of this project are:
|
||||
|
||||
- Support as many distros as possible
|
||||
- Simplify configuration, while still giving multiple opportunities to set or override defaults
|
||||
- Execute an external app to do any boot install/setup finalization
|
||||
- One such app is here: https://gitlab.com/postmarketOS/boot-deploy
|
||||
- But implementation can be anything, see the section on *BOOT-DEPLOY*
|
||||
for more info
|
||||
|
||||
# DEVICEINFO
|
||||
|
||||
The canonical deviceinfo "specification" is at
|
||||
https://wiki.postmarketos.org/wiki/Deviceinfo_reference
|
||||
|
||||
mkinitfs reads deviceinfo values from */usr/share/deviceinfo/deviceinfo* and
|
||||
*/etc/deviceinfo*, in that order. The following variables
|
||||
are *required* by mkinitfs:
|
||||
|
||||
- deviceinfo_create_initfs_extra
|
||||
- deviceinfo_generate_systemd_boot
|
||||
- deviceinfo_initfs_compression
|
||||
- deviceinfo_initfs_extra_compression
|
||||
- deviceinfo_uboot_boardname
|
||||
|
||||
It is a design goal to keep the number of required variables from deviceinfo to
|
||||
a bare minimum, and to require only variables that don't hold lists of things.
|
||||
|
||||
*NOTE*: When deviceinfo_initfs_extra_compression is set, make sure that the
|
||||
necessary tools to extract the configured archive format are in the initramfs
|
||||
archive.
|
||||
|
||||
# ARCHIVE COMPRESSION
|
||||
|
||||
Archive compression parameters are specified in the
|
||||
*deviceinfo_initfs_compression* and *deviceinfo_initfs_extra_compression*
|
||||
deviceinfo variables. Their values do not have to match, but special
|
||||
consideration should be taken since some formats may require additional kernel
|
||||
options or tools in the initramfs to support it.
|
||||
|
||||
Supported compression *formats* for mkinitfs are:
|
||||
|
||||
- gzip
|
||||
- lz4
|
||||
- lzma
|
||||
- none
|
||||
- zstd
|
||||
|
||||
Supported compression *levels* for mkinitfs:
|
||||
|
||||
- best
|
||||
- default
|
||||
- fast
|
||||
|
||||
The value of these variables follows this syntax: *<format>:<level>*. For
|
||||
example, *zstd* with the *fast* compression level would be:
|
||||
*deviceinfo_initfs_compression="zstd:fast"*
|
||||
|
||||
Defaults to *gzip* and *default* for both archives if format and/or level is
|
||||
unsupported or omitted.
|
||||
|
||||
|
||||
# DIRECTORIES
|
||||
|
||||
The following directories are used by mkinitfs to generate the initramfs and
|
||||
initramfs-extra archives. Directories that end in *-extra* indicate directories
|
||||
that are used for constructing the initramfs-extra archive, while those without
|
||||
it are for constructing the initramfs archive.
|
||||
|
||||
Configuration under */usr/share/mkinitfs* is intended to be managed by
|
||||
distributions, while configuration under */etc/mkinitfs* is for users to
|
||||
create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, and then from */etc/mkinitfs*.
|
||||
|
||||
## /usr/share/mkinitfs/files, /etc/mkinitfs/files
|
||||
## /usr/share/mkinitfs/files-extra, /etc/mkinitfs/files-extra
|
||||
|
||||
Files with the *.files* extension are read as a list of
|
||||
files/directories. Each line is in the format:
|
||||
|
||||
```
|
||||
<source path>:<destination path>
|
||||
```
|
||||
|
||||
The source path is the location, at runtime, of the file or directory
|
||||
which will be copied to the destination path within the initramfs
|
||||
archive. Specifying a destination path, with *:<destination path>* is
|
||||
optional. If it is omitted, then the source path will be used as the
|
||||
destination path within the archive. The source and destination paths
|
||||
are delimited by a *:* (colon.) Destination path is ignored if the source
|
||||
path is a glob that returns more than 1 file. This may change in the future.
|
||||
|
||||
[[ *Line in .files*
|
||||
:< Comment
|
||||
| */usr/share/bazz*
|
||||
: File or directory */usr/share/bazz* would be added to the archive under */usr/share/bazz*
|
||||
| */usr/share/bazz:/bazz*
|
||||
: File or directory */usr/share/bazz* would be added to the archive under */bazz*
|
||||
| */root/something/\**
|
||||
: Everything under */root/something* would be added to the archive under */root/something*
|
||||
| */etc/foo/\*/bazz:/foo*
|
||||
: Anything that matches the glob will be installed under the source path in the archive. For example, */etc/foo/bar/bazz* would be installed at */etc/foo/bar/bazz* in the archive. The destination path is ignored.
|
||||
|
||||
It's possible to overwrite file/directory destinations from
|
||||
configuration in */usr/share/mkinitfs* by specifying the same source
|
||||
path(s) under the relevant directory in */etc/mkinitfs*, and changing
|
||||
the destination path.
|
||||
|
||||
Any lines in these files that start with *#* are considered comments, and
|
||||
skipped.
|
||||
|
||||
## /usr/share/mkinitfs/hooks, /etc/mkinitfs/hooks
|
||||
## /usr/share/mkinitfs/hooks-cleanup, /etc/mkinitfs/hooks-cleanup
|
||||
## /usr/share/mkinitfs/hooks-extra, /etc/mkinitfs/hooks-extra
|
||||
|
||||
Any files listed under these directories are copied as-is into the
|
||||
relevant archives. Hooks are generally script files, but how they are
|
||||
treated in the initramfs is entirely up to whatever init script is run
|
||||
there on boot.
|
||||
|
||||
Hooks are installed in the initramfs under the */hooks* directory, and
|
||||
under */hooks-extra* for the initramfs-extra.
|
||||
|
||||
## /usr/share/mkinitfs/modules, /etc/mkinitfs/modules
|
||||
## /usr/share/mkinitfs/modules-extra, /etc/mkinitfs/modules-extra
|
||||
|
||||
Files with the *.modules* extension in these directories are lists of
|
||||
kernel modules to include in the initramfs. Individual modules and
|
||||
directories can be listed in the files here. Globbing is also supported.
|
||||
|
||||
Modules are installed in the initramfs archive under the same path they
|
||||
exist on the system where mkinitfs is executed.
|
||||
|
||||
Any lines in these files that start with *#* are considered comments, and
|
||||
skipped.
|
||||
|
||||
## /usr/share/mkinitfs/dirs, /etc/mkinitfs/dirs
|
||||
|
||||
Files with the *.dirs* extension in these directories are lists of
|
||||
directories to create within the initramfs. There is no *-extra* variant,
|
||||
since directories are of negligible size.
|
||||
|
||||
Any lines in these files that start with *#* are considered comments, and
|
||||
skipped.
|
||||
|
||||
# BOOT-DEPLOY
|
||||
|
||||
After generating archives, mkinitfs will execute *boot-deploy*, using *$PATH* to
|
||||
search for the app. The following commandline options are passed to it:
|
||||
|
||||
*-i* <initramfs filename>
|
||||
|
||||
Currently this is hardcoded to be "initramfs"
|
||||
|
||||
*-k* <kernel filename>
|
||||
|
||||
*-d* <work directory>
|
||||
|
||||
Path to the directory containing the build artifacts from mkinitfs.
|
||||
|
||||
*-o* <destination directory>
|
||||
|
||||
Path to the directory that boot-deploy should use as its root when
|
||||
installing files.
|
||||
|
||||
*initramfs-extra*
|
||||
|
||||
This string is the filename of the initramfs-extra archive.
|
||||
|
||||
# AUTHORS
|
||||
|
||||
*Clayton Craft* <clayton@craftyguy.net>
|
17
go.mod
17
go.mod
@@ -1,10 +1,19 @@
|
||||
module gitlab.com/postmarketOS/postmarketos-mkinitfs
|
||||
|
||||
go 1.16
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e
|
||||
github.com/klauspost/compress v1.13.3 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||
github.com/klauspost/compress v1.15.12
|
||||
github.com/pierrec/lz4/v4 v4.1.17
|
||||
github.com/ulikunitz/xz v0.5.10
|
||||
golang.org/x/sys v0.18.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/mvdan/sh v2.6.4+incompatible // indirect
|
||||
golang.org/x/crypto v0.21.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/term v0.18.0 // indirect
|
||||
mvdan.cc/sh v2.6.4+incompatible // indirect
|
||||
)
|
||||
|
23
go.sum
23
go.sum
@@ -1,9 +1,22 @@
|
||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RSSp2Om9lubZpiMgVbvn39bsUmW9U5h0twqc=
|
||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/klauspost/compress v1.13.3 h1:BtAvtV1+h0YwSVwWoYXMREPpYu9VzTJ9QDI1TEg/iQQ=
|
||||
github.com/klauspost/compress v1.13.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/mvdan/sh v2.6.4+incompatible h1:D4oEWW0J8cL7zeQkrXw76IAYXF0mJfDaBwjgzmKb6zs=
|
||||
github.com/mvdan/sh v2.6.4+incompatible/go.mod h1:kipHzrJQZEDCMTNRVRAlMMFjqHEYrthfIlFkJSrmDZE=
|
||||
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
||||
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
||||
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
mvdan.cc/sh v2.6.4+incompatible h1:eD6tDeh0pw+/TOTI1BBEryZ02rD2nMcFsgcvde7jffM=
|
||||
mvdan.cc/sh v2.6.4+incompatible/go.mod h1:IeeQbZq+x2SUGBensq/jge5lLQbS3XT2ktyp3wrt4x8=
|
||||
|
484
internal/archive/archive.go
Normal file
484
internal/archive/archive.go
Normal file
@@ -0,0 +1,484 @@
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/cavaliercoder/go-cpio"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/pierrec/lz4/v4"
|
||||
"github.com/ulikunitz/xz"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
||||
)
|
||||
|
||||
type CompressFormat string
|
||||
|
||||
const (
|
||||
FormatGzip CompressFormat = "gzip"
|
||||
FormatLzma CompressFormat = "lzma"
|
||||
FormatLz4 CompressFormat = "lz4"
|
||||
FormatZstd CompressFormat = "zstd"
|
||||
FormatNone CompressFormat = "none"
|
||||
)
|
||||
|
||||
type CompressLevel string
|
||||
|
||||
const (
|
||||
// Mapped to the "default" level for the given format
|
||||
LevelDefault CompressLevel = "default"
|
||||
// Maps to the fastest compression level for the given format
|
||||
LevelFast CompressLevel = "fast"
|
||||
// Maps to the best compression level for the given format
|
||||
LevelBest CompressLevel = "best"
|
||||
)
|
||||
|
||||
type Archive struct {
|
||||
cpioWriter *cpio.Writer
|
||||
buf *bytes.Buffer
|
||||
compress_format CompressFormat
|
||||
compress_level CompressLevel
|
||||
items archiveItems
|
||||
}
|
||||
|
||||
func New(format CompressFormat, level CompressLevel) *Archive {
|
||||
buf := new(bytes.Buffer)
|
||||
archive := &Archive{
|
||||
cpioWriter: cpio.NewWriter(buf),
|
||||
buf: buf,
|
||||
compress_format: format,
|
||||
compress_level: level,
|
||||
}
|
||||
|
||||
return archive
|
||||
}
|
||||
|
||||
type archiveItem struct {
|
||||
header *cpio.Header
|
||||
sourcePath string
|
||||
}
|
||||
|
||||
type archiveItems struct {
|
||||
items []archiveItem
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// ExtractFormatLevel parses the given string in the format format[:level],
|
||||
// where :level is one of CompressLevel consts. If level is omitted from the
|
||||
// string, or if it can't be parsed, the level is set to the default level for
|
||||
// the given format. If format is unknown, gzip is selected. This function is
|
||||
// designed to always return something usable within this package.
|
||||
func ExtractFormatLevel(s string) (format CompressFormat, level CompressLevel) {
|
||||
|
||||
f, l, found := strings.Cut(s, ":")
|
||||
if !found {
|
||||
l = "default"
|
||||
}
|
||||
|
||||
level = CompressLevel(strings.ToLower(l))
|
||||
format = CompressFormat(strings.ToLower(f))
|
||||
switch level {
|
||||
|
||||
}
|
||||
switch level {
|
||||
case LevelBest:
|
||||
case LevelDefault:
|
||||
case LevelFast:
|
||||
default:
|
||||
log.Print("Unknown or no compression level set, using default")
|
||||
level = LevelDefault
|
||||
}
|
||||
|
||||
switch format {
|
||||
case FormatGzip:
|
||||
case FormatLzma:
|
||||
log.Println("Format lzma doesn't support a compression level, using default settings")
|
||||
level = LevelDefault
|
||||
case FormatLz4:
|
||||
case FormatNone:
|
||||
case FormatZstd:
|
||||
default:
|
||||
log.Print("Unknown or no compression format set, using gzip")
|
||||
format = FormatGzip
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Adds the given item to the archiveItems, only if it doesn't already exist in
|
||||
// the list. The items are kept sorted in ascending order.
|
||||
func (a *archiveItems) add(item archiveItem) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
if len(a.items) < 1 {
|
||||
// empty list
|
||||
a.items = append(a.items, item)
|
||||
return
|
||||
}
|
||||
|
||||
// find existing item, or index of where new item should go
|
||||
i := sort.Search(len(a.items), func(i int) bool {
|
||||
return strings.Compare(item.header.Name, a.items[i].header.Name) <= 0
|
||||
})
|
||||
|
||||
if i >= len(a.items) {
|
||||
// doesn't exist in list, but would be at the very end
|
||||
a.items = append(a.items, item)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.Compare(a.items[i].header.Name, item.header.Name) == 0 {
|
||||
// already in list
|
||||
return
|
||||
}
|
||||
|
||||
// grow list by 1, shift right at index, and insert new string at index
|
||||
a.items = append(a.items, archiveItem{})
|
||||
copy(a.items[i+1:], a.items[i:])
|
||||
a.items[i] = item
|
||||
}
|
||||
|
||||
// iterate through items and send each one over the returned channel
|
||||
func (a *archiveItems) IterItems() <-chan archiveItem {
|
||||
ch := make(chan archiveItem)
|
||||
go func() {
|
||||
a.RLock()
|
||||
defer a.RUnlock()
|
||||
|
||||
for _, item := range a.items {
|
||||
ch <- item
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (archive *Archive) Write(path string, mode os.FileMode) error {
|
||||
if err := archive.writeCpio(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := archive.cpioWriter.Close(); err != nil {
|
||||
return fmt.Errorf("archive.Write: error closing archive: %w", err)
|
||||
}
|
||||
|
||||
// Write archive to path
|
||||
if err := archive.writeCompressed(path, mode); err != nil {
|
||||
return fmt.Errorf("unable to write archive to location %q: %w", path, err)
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return fmt.Errorf("unable to chmod %q to %s: %w", path, mode, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddItems adds the given items in the map to the archive. The map format is
|
||||
// {source path:dest path}. Internally this just calls AddItem on each
|
||||
// key,value pair in the map.
|
||||
func (archive *Archive) AddItems(flister filelist.FileLister) error {
|
||||
list, err := flister.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range list.IterItems() {
|
||||
if err := archive.AddItem(i.Source, i.Dest); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddItemsExclude is like AddItems, but takes a second FileLister that lists
|
||||
// items that should not be added to the archive from the first FileLister
|
||||
func (archive *Archive) AddItemsExclude(flister filelist.FileLister, exclude filelist.FileLister) error {
|
||||
list, err := flister.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
excludeList, err := exclude.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range list.IterItems() {
|
||||
dest, found := excludeList.Get(i.Source)
|
||||
|
||||
if found {
|
||||
if i.Dest != dest {
|
||||
found = false
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
if err := archive.AddItem(i.Source, i.Dest); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds the given file or directory at "source" to the archive at "dest"
|
||||
func (archive *Archive) AddItem(source string, dest string) error {
|
||||
if osutil.HasMergedUsr() {
|
||||
source = osutil.MergeUsr(source)
|
||||
dest = osutil.MergeUsr(dest)
|
||||
}
|
||||
sourceStat, err := os.Lstat(source)
|
||||
if err != nil {
|
||||
e, ok := err.(*os.PathError)
|
||||
if e.Err == syscall.ENOENT && ok {
|
||||
// doesn't exist in current filesystem, assume it's a new directory
|
||||
return archive.addDir(dest)
|
||||
}
|
||||
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
|
||||
}
|
||||
|
||||
// A symlink to a directory doesn't have the os.ModeDir bit set, so we need
|
||||
// to check if it's a symlink first
|
||||
if sourceStat.Mode()&os.ModeSymlink != 0 {
|
||||
return archive.addSymlink(source, dest)
|
||||
}
|
||||
|
||||
if sourceStat.Mode()&os.ModeDir != 0 {
|
||||
return archive.addDir(dest)
|
||||
}
|
||||
|
||||
return archive.addFile(source, dest)
|
||||
}
|
||||
|
||||
func (archive *Archive) addSymlink(source string, dest string) error {
|
||||
target, err := os.Readlink(source)
|
||||
if err != nil {
|
||||
log.Print("addSymlink: failed to get symlink target for: ", source)
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure we pick up the symlink target too
|
||||
targetAbs := target
|
||||
if filepath.Dir(target) == "." {
|
||||
// relative symlink, make it absolute so we can add the target to the archive
|
||||
targetAbs = filepath.Join(filepath.Dir(source), target)
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(targetAbs) {
|
||||
targetAbs, err = osutil.RelativeSymlinkTargetToDir(targetAbs, filepath.Dir(source))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
archive.AddItem(targetAbs, targetAbs)
|
||||
|
||||
// Now add the symlink itself
|
||||
destFilename := strings.TrimPrefix(dest, "/")
|
||||
|
||||
archive.items.add(archiveItem{
|
||||
sourcePath: source,
|
||||
header: &cpio.Header{
|
||||
Name: destFilename,
|
||||
Linkname: target,
|
||||
Mode: 0644 | cpio.ModeSymlink,
|
||||
Size: int64(len(target)),
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) addFile(source string, dest string) error {
|
||||
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sourceStat, err := os.Lstat(source)
|
||||
if err != nil {
|
||||
log.Print("addFile: failed to stat file: ", source)
|
||||
return err
|
||||
}
|
||||
|
||||
destFilename := strings.TrimPrefix(dest, "/")
|
||||
|
||||
archive.items.add(archiveItem{
|
||||
sourcePath: source,
|
||||
header: &cpio.Header{
|
||||
Name: destFilename,
|
||||
Mode: cpio.FileMode(sourceStat.Mode().Perm()),
|
||||
Size: sourceStat.Size(),
|
||||
// Checksum: 1,
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) writeCompressed(path string, mode os.FileMode) (err error) {
|
||||
|
||||
var compressor io.WriteCloser
|
||||
defer func() {
|
||||
e := compressor.Close()
|
||||
if e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
}()
|
||||
|
||||
fd, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Note: fd.Close omitted since it'll be closed in "compressor"
|
||||
|
||||
switch archive.compress_format {
|
||||
case FormatGzip:
|
||||
level := gzip.DefaultCompression
|
||||
switch archive.compress_level {
|
||||
case LevelBest:
|
||||
level = gzip.BestCompression
|
||||
case LevelFast:
|
||||
level = gzip.BestSpeed
|
||||
}
|
||||
compressor, err = gzip.NewWriterLevel(fd, level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case FormatLzma:
|
||||
compressor, err = xz.NewWriter(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case FormatLz4:
|
||||
// The default compression for the lz4 library is Fast, and
|
||||
// they don't define a Default level otherwise
|
||||
level := lz4.Fast
|
||||
switch archive.compress_level {
|
||||
case LevelBest:
|
||||
level = lz4.Level9
|
||||
case LevelFast:
|
||||
level = lz4.Fast
|
||||
}
|
||||
|
||||
var writer = lz4.NewWriter(fd)
|
||||
err = writer.Apply(lz4.LegacyOption(true), lz4.CompressionLevelOption(level))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
compressor = writer
|
||||
case FormatNone:
|
||||
compressor = fd
|
||||
case FormatZstd:
|
||||
level := zstd.SpeedDefault
|
||||
switch archive.compress_level {
|
||||
case LevelBest:
|
||||
level = zstd.SpeedBestCompression
|
||||
case LevelFast:
|
||||
level = zstd.SpeedFastest
|
||||
}
|
||||
compressor, err = zstd.NewWriter(fd, zstd.WithEncoderLevel(level))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
log.Print("Unknown or no compression format set, using gzip")
|
||||
compressor = gzip.NewWriter(fd)
|
||||
}
|
||||
|
||||
if _, err = io.Copy(compressor, archive.buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// call fsync just to be sure
|
||||
if err := fd.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) writeCpio() error {
|
||||
// Just in case
|
||||
if osutil.HasMergedUsr() {
|
||||
archive.addSymlink("/bin", "/bin")
|
||||
archive.addSymlink("/sbin", "/sbin")
|
||||
archive.addSymlink("/lib", "/lib")
|
||||
}
|
||||
// having a transient function for actually adding files to the archive
|
||||
// allows the deferred fd.close to run after every copy and prevent having
|
||||
// tons of open file handles until the copying is all done
|
||||
copyToArchive := func(source string, header *cpio.Header) error {
|
||||
|
||||
if err := archive.cpioWriter.WriteHeader(header); err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: unable to write header: %w", err)
|
||||
}
|
||||
|
||||
// don't copy actual dirs into the archive, writing the header is enough
|
||||
if !header.Mode.IsDir() {
|
||||
if header.Mode.IsRegular() {
|
||||
fd, err := os.Open(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: Unable to open file %q, %w", source, err)
|
||||
}
|
||||
defer fd.Close()
|
||||
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: Couldn't process %q: %w", source, err)
|
||||
}
|
||||
} else if header.Linkname != "" {
|
||||
// the contents of a symlink is just need the link name
|
||||
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %q -> %q: %w", source, header.Linkname, err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("archive.writeCpio: unknown type for file: %q: %d", source, header.Mode)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range archive.items.IterItems() {
|
||||
if err := copyToArchive(i.sourcePath, i.header); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) addDir(dir string) error {
|
||||
if dir == "/" {
|
||||
dir = "."
|
||||
}
|
||||
|
||||
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
|
||||
for i, subdir := range subdirs {
|
||||
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
|
||||
archive.items.add(archiveItem{
|
||||
sourcePath: path,
|
||||
header: &cpio.Header{
|
||||
Name: path,
|
||||
Mode: cpio.ModeDir | 0755,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
278
internal/archive/archive_test.go
Normal file
278
internal/archive/archive_test.go
Normal file
@@ -0,0 +1,278 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/cavaliercoder/go-cpio"
|
||||
)
|
||||
|
||||
func TestArchiveItemsAdd(t *testing.T) {
|
||||
subtests := []struct {
|
||||
name string
|
||||
inItems []archiveItem
|
||||
inItem archiveItem
|
||||
expected []archiveItem
|
||||
}{
|
||||
{
|
||||
name: "empty list",
|
||||
inItems: []archiveItem{},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "already exists",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add new",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar1",
|
||||
header: &cpio.Header{Name: "/foo/bar1"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/foo/bar0",
|
||||
header: &cpio.Header{Name: "/foo/bar0"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar0",
|
||||
header: &cpio.Header{Name: "/foo/bar0"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar1",
|
||||
header: &cpio.Header{Name: "/foo/bar1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add new at beginning",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add new at end",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/zzz/bazz",
|
||||
header: &cpio.Header{Name: "/zzz/bazz"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/zzz/bazz",
|
||||
header: &cpio.Header{Name: "/zzz/bazz"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, st := range subtests {
|
||||
t.Run(st.name, func(t *testing.T) {
|
||||
a := archiveItems{items: st.inItems}
|
||||
a.add(st.inItem)
|
||||
if !reflect.DeepEqual(st.expected, a.items) {
|
||||
t.Fatal("expected:", st.expected, " got: ", a.items)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractFormatLevel(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
expectedFormat CompressFormat
|
||||
expectedLevel CompressLevel
|
||||
}{
|
||||
{
|
||||
name: "gzip, default level",
|
||||
in: "gzip:default",
|
||||
expectedFormat: FormatGzip,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "unknown format, level 12",
|
||||
in: "pear:12",
|
||||
expectedFormat: FormatGzip,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "zstd, level not given",
|
||||
in: "zstd",
|
||||
expectedFormat: FormatZstd,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "zstd, invalid level 'fast:'",
|
||||
in: "zstd:fast:",
|
||||
expectedFormat: FormatZstd,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "zstd, best",
|
||||
in: "zstd:best",
|
||||
expectedFormat: FormatZstd,
|
||||
expectedLevel: LevelBest,
|
||||
},
|
||||
{
|
||||
name: "zstd, level empty :",
|
||||
in: "zstd:",
|
||||
expectedFormat: FormatZstd,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "gzip, best",
|
||||
in: "gzip:best",
|
||||
expectedFormat: FormatGzip,
|
||||
expectedLevel: LevelBest,
|
||||
},
|
||||
{
|
||||
name: "<empty>, <empty>",
|
||||
in: "",
|
||||
expectedFormat: FormatGzip,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "lzma, fast",
|
||||
in: "lzma:fast",
|
||||
expectedFormat: FormatLzma,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "lz4, fast",
|
||||
in: "lz4:fast",
|
||||
expectedFormat: FormatLz4,
|
||||
expectedLevel: LevelFast,
|
||||
},
|
||||
{
|
||||
name: "none",
|
||||
in: "none",
|
||||
expectedFormat: FormatNone,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
format, level := ExtractFormatLevel(test.in)
|
||||
if format != test.expectedFormat {
|
||||
t.Fatal("format expected: ", test.expectedFormat, " got: ", format)
|
||||
}
|
||||
if level != test.expectedLevel {
|
||||
t.Fatal("level expected: ", test.expectedLevel, " got: ", level)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
171
internal/bootdeploy/bootdeploy.go
Normal file
171
internal/bootdeploy/bootdeploy.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package bootdeploy
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
|
||||
)
|
||||
|
||||
type BootDeploy struct {
|
||||
inDir string
|
||||
outDir string
|
||||
devinfo deviceinfo.DeviceInfo
|
||||
}
|
||||
|
||||
// New returns a new BootDeploy, which then runs:
|
||||
//
|
||||
// boot-deploy -d indir -o outDir
|
||||
//
|
||||
// devinfo is used to access some deviceinfo values, such as UbootBoardname
|
||||
// and GenerateSystemdBoot
|
||||
func New(inDir string, outDir string, devinfo deviceinfo.DeviceInfo) *BootDeploy {
|
||||
return &BootDeploy{
|
||||
inDir: inDir,
|
||||
outDir: outDir,
|
||||
devinfo: devinfo,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BootDeploy) Run() error {
|
||||
if err := copyUbootFiles(b.inDir, b.devinfo.UbootBoardname); errors.Is(err, os.ErrNotExist) {
|
||||
log.Println("u-boot files copying skipped: ", err)
|
||||
} else {
|
||||
if err != nil {
|
||||
log.Fatal("copyUbootFiles: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
kernels, err := getKernelPath(b.outDir, b.devinfo.GenerateSystemdBoot == "true")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Pick a kernel that does not have suffixes added by boot-deploy
|
||||
var kernFile string
|
||||
for _, f := range kernels {
|
||||
if strings.HasSuffix(f, "-dtb") || strings.HasSuffix(f, "-mtk") {
|
||||
continue
|
||||
}
|
||||
kernFile = f
|
||||
break
|
||||
}
|
||||
|
||||
kernFd, err := os.Open(kernFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer kernFd.Close()
|
||||
|
||||
kernFilename := path.Base(kernFile)
|
||||
kernFileCopy, err := os.Create(filepath.Join(b.inDir, kernFilename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(kernFileCopy, kernFd); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kernFileCopy.Close(); err != nil {
|
||||
return fmt.Errorf("error closing %s: %w", kernFilename, err)
|
||||
}
|
||||
|
||||
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
|
||||
args := []string{
|
||||
"-i", "initramfs",
|
||||
"-k", kernFilename,
|
||||
"-d", b.inDir,
|
||||
"-o", b.outDir,
|
||||
}
|
||||
|
||||
if b.devinfo.CreateInitfsExtra {
|
||||
args = append(args, "initramfs-extra")
|
||||
}
|
||||
cmd := exec.Command("boot-deploy", args...)
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getKernelPath(outDir string, zboot bool) ([]string, error) {
|
||||
var kernels []string
|
||||
if zboot {
|
||||
kernels, _ = filepath.Glob(filepath.Join(outDir, "linux.efi"))
|
||||
if len(kernels) > 0 {
|
||||
return kernels, nil
|
||||
}
|
||||
// else fallback to vmlinuz* below
|
||||
}
|
||||
|
||||
kernFile := "vmlinuz*"
|
||||
kernels, _ = filepath.Glob(filepath.Join(outDir, kernFile))
|
||||
if len(kernels) == 0 {
|
||||
return nil, errors.New("Unable to find any kernels at " + filepath.Join(outDir, kernFile))
|
||||
}
|
||||
|
||||
return kernels, nil
|
||||
}
|
||||
|
||||
// Copy copies the file at srcFile path to a new file at dstFile path
|
||||
func copy(srcFile, dstFile string) error {
|
||||
out, err := os.Create(dstFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
errClose := out.Close()
|
||||
if err == nil {
|
||||
err = errClose
|
||||
}
|
||||
}()
|
||||
|
||||
in, err := os.Open(srcFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyUbootFiles uses deviceinfo_uboot_boardname to copy u-boot files required
|
||||
// for running boot-deploy
|
||||
func copyUbootFiles(path, ubootBoardname string) error {
|
||||
if ubootBoardname == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
srcDir := filepath.Join("/usr/share/u-boot", ubootBoardname)
|
||||
entries, err := os.ReadDir(srcDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
sourcePath := filepath.Join(srcDir, entry.Name())
|
||||
destPath := filepath.Join(path, entry.Name())
|
||||
|
||||
if err := copy(sourcePath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
65
internal/filelist/filelist.go
Normal file
65
internal/filelist/filelist.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package filelist
|
||||
|
||||
import "sync"
|
||||
|
||||
type FileLister interface {
|
||||
List() (*FileList, error)
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Source string
|
||||
Dest string
|
||||
}
|
||||
|
||||
type FileList struct {
|
||||
m map[string]string
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewFileList() *FileList {
|
||||
return &FileList{
|
||||
m: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileList) Add(src string, dest string) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
f.m[src] = dest
|
||||
}
|
||||
|
||||
func (f *FileList) Get(src string) (string, bool) {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
|
||||
dest, found := f.m[src]
|
||||
return dest, found
|
||||
}
|
||||
|
||||
// Import copies in the contents of src. If a source path already exists when
|
||||
// importing, then the destination path is updated with the new value.
|
||||
func (f *FileList) Import(src *FileList) {
|
||||
for i := range src.IterItems() {
|
||||
f.Add(i.Source, i.Dest)
|
||||
}
|
||||
}
|
||||
|
||||
// iterate through the list and and send each one as a new File over the
|
||||
// returned channel
|
||||
func (f *FileList) IterItems() <-chan File {
|
||||
ch := make(chan File)
|
||||
go func() {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
|
||||
for src, dest := range f.m {
|
||||
ch <- File{
|
||||
Source: src,
|
||||
Dest: dest,
|
||||
}
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
56
internal/filelist/hookdirs/hookdirs.go
Normal file
56
internal/filelist/hookdirs/hookdirs.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package hookdirs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
)
|
||||
|
||||
type HookDirs struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// New returns a new HookDirs that will use the given path to provide a list
|
||||
// of directories use.
|
||||
func New(path string) *HookDirs {
|
||||
return &HookDirs{
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HookDirs) List() (*filelist.FileList, error) {
|
||||
log.Printf("- Searching for directories specified in %s", h.path)
|
||||
|
||||
files := filelist.NewFileList()
|
||||
fileInfo, err := os.ReadDir(h.path)
|
||||
if err != nil {
|
||||
log.Println("-- Unable to find dir, skipping...")
|
||||
return files, nil
|
||||
}
|
||||
for _, file := range fileInfo {
|
||||
path := filepath.Join(h.path, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getHookDirs: unable to open hook file: %w", err)
|
||||
|
||||
}
|
||||
defer f.Close()
|
||||
log.Printf("-- Creating directories from: %s\n", path)
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
dir := strings.TrimSpace(s.Text())
|
||||
if len(dir) == 0 || strings.HasPrefix(dir, "#") {
|
||||
continue
|
||||
}
|
||||
|
||||
files.Add(dir, dir)
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
}
|
92
internal/filelist/hookfiles/hookfiles.go
Normal file
92
internal/filelist/hookfiles/hookfiles.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package hookfiles
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
||||
)
|
||||
|
||||
type HookFiles struct {
|
||||
filePath string
|
||||
}
|
||||
|
||||
// New returns a new HookFiles that will use the given path to provide a list
|
||||
// of files + any binary dependencies they might have.
|
||||
func New(filePath string) *HookFiles {
|
||||
return &HookFiles{
|
||||
filePath: filePath,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HookFiles) List() (*filelist.FileList, error) {
|
||||
log.Printf("- Searching for file lists from %s", h.filePath)
|
||||
|
||||
files := filelist.NewFileList()
|
||||
fileInfo, err := os.ReadDir(h.filePath)
|
||||
if err != nil {
|
||||
log.Println("-- Unable to find dir, skipping...")
|
||||
return files, nil
|
||||
}
|
||||
for _, file := range fileInfo {
|
||||
path := filepath.Join(h.filePath, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getHookFiles: unable to open hook file: %w", err)
|
||||
|
||||
}
|
||||
defer f.Close()
|
||||
log.Printf("-- Including files from: %s\n", path)
|
||||
|
||||
if list, err := slurpFiles(f); err != nil {
|
||||
return nil, fmt.Errorf("hookfiles: unable to process hook file %q: %w", path, err)
|
||||
} else {
|
||||
files.Import(list)
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func slurpFiles(fd io.Reader) (*filelist.FileList, error) {
|
||||
files := filelist.NewFileList()
|
||||
|
||||
s := bufio.NewScanner(fd)
|
||||
for s.Scan() {
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
|
||||
src, dest, has_dest := strings.Cut(line, ":")
|
||||
if osutil.HasMergedUsr() {
|
||||
src = osutil.MergeUsr(src)
|
||||
}
|
||||
|
||||
fFiles, err := misc.GetFiles([]string{src}, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to add %q: %w", src, err)
|
||||
}
|
||||
// loop over all returned files from GetFile
|
||||
for _, file := range fFiles {
|
||||
if !has_dest {
|
||||
files.Add(file, file)
|
||||
} else if len(fFiles) > 1 {
|
||||
// Don't support specifying dest if src was a glob
|
||||
// NOTE: this could support this later...
|
||||
files.Add(file, file)
|
||||
} else {
|
||||
// dest path specified, and only 1 file
|
||||
files.Add(file, dest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return files, s.Err()
|
||||
}
|
42
internal/filelist/hookscripts/hookscripts.go
Normal file
42
internal/filelist/hookscripts/hookscripts.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package hookscripts
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
)
|
||||
|
||||
type HookScripts struct {
|
||||
destPath string
|
||||
scriptsDir string
|
||||
}
|
||||
|
||||
// New returns a new HookScripts that will use the given path to provide a list
|
||||
// of script files. The destination for each script it set to destPath, using
|
||||
// the original file name.
|
||||
func New(scriptsDir string, destPath string) *HookScripts {
|
||||
return &HookScripts{
|
||||
destPath: destPath,
|
||||
scriptsDir: scriptsDir,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HookScripts) List() (*filelist.FileList, error) {
|
||||
log.Printf("- Searching for hook scripts from %s", h.scriptsDir)
|
||||
|
||||
files := filelist.NewFileList()
|
||||
|
||||
fileInfo, err := os.ReadDir(h.scriptsDir)
|
||||
if err != nil {
|
||||
log.Println("-- Unable to find dir, skipping...")
|
||||
return files, nil
|
||||
}
|
||||
for _, file := range fileInfo {
|
||||
path := filepath.Join(h.scriptsDir, file.Name())
|
||||
log.Printf("-- Including script: %s\n", path)
|
||||
files.Add(path, filepath.Join(h.destPath, file.Name()))
|
||||
}
|
||||
return files, nil
|
||||
}
|
38
internal/filelist/initramfs/initramfs.go
Normal file
38
internal/filelist/initramfs/initramfs.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package initramfs
|
||||
|
||||
import (
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
)
|
||||
|
||||
// Initramfs allows building arbitrarily complex lists of features, by slurping
|
||||
// up types that implement FileLister (which includes this type! yippee) and
|
||||
// combining the output from them.
|
||||
type Initramfs struct {
|
||||
features []filelist.FileLister
|
||||
files *filelist.FileList
|
||||
}
|
||||
|
||||
// New returns a new Initramfs that generate a list of files based on the given
|
||||
// list of FileListers.
|
||||
func New(features []filelist.FileLister) *Initramfs {
|
||||
return &Initramfs{
|
||||
features: features,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Initramfs) List() (*filelist.FileList, error) {
|
||||
if i.files != nil {
|
||||
return i.files, nil
|
||||
}
|
||||
i.files = filelist.NewFileList()
|
||||
|
||||
for _, f := range i.features {
|
||||
list, err := f.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i.files.Import(list)
|
||||
}
|
||||
|
||||
return i.files, nil
|
||||
}
|
218
internal/filelist/modules/modules.go
Normal file
218
internal/filelist/modules/modules.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
||||
)
|
||||
|
||||
type Modules struct {
|
||||
modulesListPath string
|
||||
}
|
||||
|
||||
// New returns a new Modules that will read in lists of kernel modules in the given path.
|
||||
func New(modulesListPath string) *Modules {
|
||||
return &Modules{
|
||||
modulesListPath: modulesListPath,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Modules) List() (*filelist.FileList, error) {
|
||||
kernVer, err := osutil.GetKernelVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files := filelist.NewFileList()
|
||||
libDir := "/usr/lib/modules"
|
||||
if exists, err := misc.Exists(libDir); !exists {
|
||||
libDir = "/lib/modules"
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", libDir, err)
|
||||
}
|
||||
|
||||
modDir := filepath.Join(libDir, kernVer)
|
||||
if exists, err := misc.Exists(modDir); !exists {
|
||||
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
||||
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
||||
return files, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", modDir, err)
|
||||
}
|
||||
|
||||
// modules.* required by modprobe
|
||||
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
|
||||
for _, file := range modprobeFiles {
|
||||
files.Add(file, file)
|
||||
}
|
||||
|
||||
// slurp up modules from lists in modulesListPath
|
||||
log.Printf("- Searching for kernel modules from %s", m.modulesListPath)
|
||||
fileInfo, err := os.ReadDir(m.modulesListPath)
|
||||
if err != nil {
|
||||
return files, nil
|
||||
}
|
||||
for _, file := range fileInfo {
|
||||
path := filepath.Join(m.modulesListPath, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open module list file %q: %w", path, err)
|
||||
}
|
||||
defer f.Close()
|
||||
log.Printf("-- Including modules from: %s\n", path)
|
||||
|
||||
if list, err := slurpModules(f, modDir); err != nil {
|
||||
return nil, fmt.Errorf("unable to process module list file %q: %w", path, err)
|
||||
} else {
|
||||
files.Import(list)
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
|
||||
files := filelist.NewFileList()
|
||||
s := bufio.NewScanner(fd)
|
||||
for s.Scan() {
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
dir, file := filepath.Split(line)
|
||||
if file == "" {
|
||||
// item is a directory
|
||||
dir = filepath.Join(modDir, dir)
|
||||
dirs, _ := filepath.Glob(dir)
|
||||
for _, d := range dirs {
|
||||
if modFilelist, err := getModulesInDir(d); err != nil {
|
||||
return nil, fmt.Errorf("unable to get modules dir %q: %w", d, err)
|
||||
} else {
|
||||
for _, file := range modFilelist {
|
||||
files.Add(file, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if dir == "" {
|
||||
// item is a module name
|
||||
if modFilelist, err := getModule(line, modDir); err != nil {
|
||||
return nil, fmt.Errorf("unable to get module file %q: %w", line, err)
|
||||
} else {
|
||||
for _, file := range modFilelist {
|
||||
files.Add(file, file)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unknown module entry: %q", line)
|
||||
}
|
||||
}
|
||||
|
||||
return files, s.Err()
|
||||
}
|
||||
|
||||
func getModulesInDir(modPath string) (files []string, err error) {
|
||||
err = filepath.Walk(modPath, func(path string, _ os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
// Unable to walk path
|
||||
return err
|
||||
}
|
||||
// this assumes module names are in the format <name>.ko[.format],
|
||||
// where ".format" (e.g. ".gz") is optional.
|
||||
if !strings.Contains(".ko", path) {
|
||||
return nil
|
||||
}
|
||||
files = append(files, path)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
|
||||
// file and all of its dependencies.
|
||||
// Note: it's not necessarily fatal if the module is not found, since it may
|
||||
// have been built into the kernel
|
||||
func getModule(modName string, modDir string) (files []string, err error) {
|
||||
|
||||
modDep := filepath.Join(modDir, "modules.dep")
|
||||
if exists, err := misc.Exists(modDep); !exists {
|
||||
return nil, fmt.Errorf("kernel module.dep not found: %s", modDir)
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("received unexpected error when getting module.dep status: %w", err)
|
||||
}
|
||||
|
||||
fd, err := os.Open(modDep)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open modules.dep: %w", err)
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
deps, err := getModuleDeps(modName, fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, dep := range deps {
|
||||
p := filepath.Join(modDir, dep)
|
||||
if exists, err := misc.Exists(p); !exists {
|
||||
return nil, fmt.Errorf("tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p)
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", p, err)
|
||||
}
|
||||
|
||||
files = append(files, p)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get the canonicalized name for the module as represented in the given modules.dep io.reader
|
||||
func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
|
||||
var deps []string
|
||||
|
||||
// split the module name on - and/or _, build a regex for matching
|
||||
splitRe := regexp.MustCompile("[-_]+")
|
||||
modNameReStr := splitRe.ReplaceAllString(modName, "[-_]+")
|
||||
re := regexp.MustCompile("^" + modNameReStr + "$")
|
||||
|
||||
s := bufio.NewScanner(modulesDep)
|
||||
for s.Scan() {
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) == 0 {
|
||||
continue
|
||||
}
|
||||
fields[0] = strings.TrimSuffix(fields[0], ":")
|
||||
|
||||
found := re.FindAll([]byte(filepath.Base(stripExts(fields[0]))), -1)
|
||||
if len(found) > 0 {
|
||||
deps = append(deps, fields...)
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
log.Print("Unable to get module + dependencies: ", modName)
|
||||
return deps, err
|
||||
}
|
||||
|
||||
return deps, nil
|
||||
}
|
||||
|
||||
func stripExts(file string) string {
|
||||
return strings.Split(file, ".")[0]
|
||||
}
|
@@ -1,7 +1,7 @@
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// Copyright 2023 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package main
|
||||
package modules
|
||||
|
||||
import (
|
||||
"strings"
|
||||
@@ -18,6 +18,7 @@ func TestStripExts(t *testing.T) {
|
||||
{"another_file", "another_file"},
|
||||
{"a.b.c.d.e.f.g.h.i", "a"},
|
||||
{"virtio_blk.ko", "virtio_blk"},
|
||||
{"virtio_blk.ko ", "virtio_blk"},
|
||||
}
|
||||
for _, table := range tables {
|
||||
out := stripExts(table.in)
|
||||
@@ -27,18 +28,6 @@ func TestStripExts(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func stringSlicesEqual(a []string, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, v := range a {
|
||||
if v != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var testModuleDep string = `
|
||||
kernel/sound/soc/codecs/snd-soc-msm8916-digital.ko:
|
||||
kernel/net/sched/act_ipt.ko.xz: kernel/net/netfilter/x_tables.ko.xz
|
||||
@@ -80,3 +69,15 @@ func TestGetModuleDeps(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func stringSlicesEqual(a []string, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, v := range a {
|
||||
if v != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
197
internal/misc/getfiles.go
Normal file
197
internal/misc/getfiles.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package misc
|
||||
|
||||
import (
|
||||
"debug/elf"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
||||
)
|
||||
|
||||
func GetFiles(list []string, required bool) (files []string, err error) {
|
||||
for _, file := range list {
|
||||
filelist, err := getFile(file, required)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, filelist...)
|
||||
}
|
||||
|
||||
files = RemoveDuplicates(files)
|
||||
return
|
||||
}
|
||||
|
||||
func getFile(file string, required bool) (files []string, err error) {
|
||||
// Expand glob expression
|
||||
expanded, err := filepath.Glob(file)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(expanded) > 0 && expanded[0] != file {
|
||||
for _, path := range expanded {
|
||||
if globFiles, err := getFile(path, required); err != nil {
|
||||
return files, err
|
||||
} else {
|
||||
files = append(files, globFiles...)
|
||||
}
|
||||
}
|
||||
return RemoveDuplicates(files), nil
|
||||
}
|
||||
|
||||
// If the file is a symlink we need to do this to prevent an infinite recursion
|
||||
// loop:
|
||||
// Symlinks need special handling to prevent infinite recursion:
|
||||
// 1) add the symlink to the list of files
|
||||
// 2) set file to dereferenced target
|
||||
// 4) continue this function to either walk it if the target is a dir or add the
|
||||
// target to the list of files
|
||||
if s, err := os.Lstat(file); err == nil {
|
||||
if s.Mode()&fs.ModeSymlink != 0 {
|
||||
files = append(files, file)
|
||||
if target, err := filepath.EvalSymlinks(file); err != nil {
|
||||
return files, err
|
||||
} else {
|
||||
file = target
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(file)
|
||||
if err != nil {
|
||||
// Check if there is a Zstd-compressed version of the file
|
||||
fileZstd := file + ".zst" // .zst is the extension used by linux-firmware
|
||||
fileInfoZstd, errZstd := os.Stat(fileZstd)
|
||||
|
||||
if errZstd == nil {
|
||||
file = fileZstd
|
||||
fileInfo = fileInfoZstd
|
||||
// Unset nil so we don't retain the error from the os.Stat call for the uncompressed version.
|
||||
err = nil
|
||||
} else {
|
||||
if required {
|
||||
return files, fmt.Errorf("getFile: failed to stat file %q: %w (also tried %q: %w)", file, err, fileZstd, errZstd)
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
}
|
||||
|
||||
if fileInfo.IsDir() {
|
||||
// Recurse over directory contents
|
||||
err := filepath.Walk(file, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
newFiles, err := getFile(path, required)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files = append(files, newFiles...)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return files, err
|
||||
}
|
||||
} else {
|
||||
files = append(files, file)
|
||||
|
||||
// get dependencies for binaries
|
||||
if _, err := elf.Open(file); err == nil {
|
||||
if binaryDepFiles, err := getBinaryDeps(file); err != nil {
|
||||
return files, err
|
||||
} else {
|
||||
files = append(files, binaryDepFiles...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
files = RemoveDuplicates(files)
|
||||
return
|
||||
}
|
||||
|
||||
func getDeps(file string, parents map[string]struct{}) (files []string, err error) {
|
||||
|
||||
if _, found := parents[file]; found {
|
||||
return
|
||||
}
|
||||
|
||||
// get dependencies for binaries
|
||||
fd, err := elf.Open(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getDeps: unable to open elf binary %q: %w", file, err)
|
||||
}
|
||||
libs, _ := fd.ImportedLibraries()
|
||||
fd.Close()
|
||||
files = append(files, file)
|
||||
parents[file] = struct{}{}
|
||||
|
||||
if len(libs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// we don't recursively search these paths for performance reasons
|
||||
libdirGlobs := []string{
|
||||
"/usr/lib",
|
||||
"/lib",
|
||||
"/usr/lib/expect*",
|
||||
"/usr/lib/systemd",
|
||||
}
|
||||
|
||||
for _, lib := range libs {
|
||||
found := false
|
||||
findDepLoop:
|
||||
for _, libdirGlob := range libdirGlobs {
|
||||
libdirs, _ := filepath.Glob(libdirGlob)
|
||||
for _, libdir := range libdirs {
|
||||
path := filepath.Join(libdir, lib)
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
binaryDepFiles, err := getDeps(path, parents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, binaryDepFiles...)
|
||||
files = append(files, path)
|
||||
found = true
|
||||
break findDepLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, fmt.Errorf("getDeps: unable to locate dependency for %q: %s", file, lib)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively list all dependencies for a given ELF binary
|
||||
func getBinaryDeps(file string) ([]string, error) {
|
||||
// if file is a symlink, resolve dependencies for target
|
||||
fileStat, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getBinaryDeps: failed to stat file %q: %w", file, err)
|
||||
}
|
||||
|
||||
// Symlink: write symlink to archive then set 'file' to link target
|
||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||
target, err := os.Readlink(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getBinaryDeps: unable to read symlink %q: %w", file, err)
|
||||
}
|
||||
if !filepath.IsAbs(target) {
|
||||
target, err = osutil.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
file = target
|
||||
}
|
||||
|
||||
return getDeps(file, make(map[string]struct{}))
|
||||
|
||||
}
|
167
internal/misc/getfiles_test.go
Normal file
167
internal/misc/getfiles_test.go
Normal file
@@ -0,0 +1,167 @@
|
||||
// Copyright 2025 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package misc
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestGetFile(t *testing.T) {
|
||||
subtests := []struct {
|
||||
name string
|
||||
setup func(tmpDir string) (inputPath string, expectedFiles []string, err error)
|
||||
required bool
|
||||
}{
|
||||
{
|
||||
name: "symlink to directory - no infinite recursion",
|
||||
setup: func(tmpDir string) (string, []string, error) {
|
||||
// Create target directory with files
|
||||
targetDir := filepath.Join(tmpDir, "target")
|
||||
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
testFile1 := filepath.Join(targetDir, "file1.txt")
|
||||
testFile2 := filepath.Join(targetDir, "file2.txt")
|
||||
if err := os.WriteFile(testFile1, []byte("content1"), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if err := os.WriteFile(testFile2, []byte("content2"), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Create symlink pointing to target directory
|
||||
symlinkPath := filepath.Join(tmpDir, "symlink")
|
||||
if err := os.Symlink(targetDir, symlinkPath); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
expected := []string{symlinkPath, testFile1, testFile2}
|
||||
return symlinkPath, expected, nil
|
||||
},
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "symlink to file - returns both symlink and target",
|
||||
setup: func(tmpDir string) (string, []string, error) {
|
||||
// Create target file
|
||||
targetFile := filepath.Join(tmpDir, "target.txt")
|
||||
if err := os.WriteFile(targetFile, []byte("content"), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Create symlink pointing to target file
|
||||
symlinkPath := filepath.Join(tmpDir, "symlink.txt")
|
||||
if err := os.Symlink(targetFile, symlinkPath); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
expected := []string{symlinkPath, targetFile}
|
||||
return symlinkPath, expected, nil
|
||||
},
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "regular file",
|
||||
setup: func(tmpDir string) (string, []string, error) {
|
||||
regularFile := filepath.Join(tmpDir, "regular.txt")
|
||||
if err := os.WriteFile(regularFile, []byte("content"), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
expected := []string{regularFile}
|
||||
return regularFile, expected, nil
|
||||
},
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "regular directory",
|
||||
setup: func(tmpDir string) (string, []string, error) {
|
||||
// Create directory with files
|
||||
dirPath := filepath.Join(tmpDir, "testdir")
|
||||
if err := os.MkdirAll(dirPath, 0755); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
file1 := filepath.Join(dirPath, "file1.txt")
|
||||
file2 := filepath.Join(dirPath, "subdir", "file2.txt")
|
||||
|
||||
if err := os.WriteFile(file1, []byte("content1"), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(file2), 0755); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if err := os.WriteFile(file2, []byte("content2"), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
expected := []string{file1, file2}
|
||||
return dirPath, expected, nil
|
||||
},
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
name: "zst compressed file fallback",
|
||||
setup: func(tmpDir string) (string, []string, error) {
|
||||
// Create a .zst file but NOT the original file
|
||||
zstFile := filepath.Join(tmpDir, "firmware.bin.zst")
|
||||
if err := os.WriteFile(zstFile, []byte("compressed content"), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Request the original file (without .zst extension)
|
||||
originalFile := filepath.Join(tmpDir, "firmware.bin")
|
||||
|
||||
// Expected: should find and return the .zst version
|
||||
expected := []string{zstFile}
|
||||
return originalFile, expected, nil
|
||||
},
|
||||
required: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, st := range subtests {
|
||||
t.Run(st.name, func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
inputPath, expectedFiles, err := st.setup(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("setup failed: %v", err)
|
||||
}
|
||||
|
||||
// Add timeout protection for infinite recursion test
|
||||
done := make(chan struct{})
|
||||
var files []string
|
||||
var getFileErr error
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
files, getFileErr = getFile(inputPath, st.required)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
if getFileErr != nil {
|
||||
t.Fatalf("getFile failed: %v", getFileErr)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("getFile appears to be in infinite recursion (timeout)")
|
||||
}
|
||||
|
||||
// Sort for comparison
|
||||
sort.Strings(expectedFiles)
|
||||
sort.Strings(files)
|
||||
|
||||
if !reflect.DeepEqual(expectedFiles, files) {
|
||||
t.Fatalf("expected: %q, got: %q", expectedFiles, files)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
65
internal/misc/misc.go
Normal file
65
internal/misc/misc.go
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package misc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Merge the contents of "b" into "a", overwriting any previously existing keys
|
||||
// in "a"
|
||||
func Merge(a map[string]string, b map[string]string) {
|
||||
for k, v := range b {
|
||||
a[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Removes duplicate entries from the given string slice and returns a slice
|
||||
// with the unique values
|
||||
func RemoveDuplicates(in []string) (out []string) {
|
||||
// use a map to "remove" duplicates. the value in the map is totally
|
||||
// irrelevant
|
||||
outMap := make(map[string]bool)
|
||||
for _, s := range in {
|
||||
if ok := outMap[s]; !ok {
|
||||
outMap[s] = true
|
||||
}
|
||||
}
|
||||
|
||||
out = make([]string, 0, len(outMap))
|
||||
for k := range outMap {
|
||||
out = append(out, k)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Prints the execution time of a function, not meant to be very
|
||||
// sensitive/accurate, but good enough to gauge rough run times.
|
||||
// Meant to be called as:
|
||||
//
|
||||
// defer misc.TimeFunc(time.Now(), "foo")
|
||||
func TimeFunc(start time.Time, name string) {
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("%s completed in: %.2fs", name, elapsed.Seconds())
|
||||
}
|
||||
|
||||
// Exists tests if the given file/dir exists or not. Returns any errors related
|
||||
// to os.Stat if the type is *not* ErrNotExist. If an error is returned, then
|
||||
// the value of the returned boolean cannot be trusted.
|
||||
func Exists(file string) (bool, error) {
|
||||
_, err := os.Stat(file)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
} else if errors.Is(err, os.ErrNotExist) {
|
||||
// Don't return the error, the file doesn't exist which is OK
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Other errors from os.Stat returned here
|
||||
return false, err
|
||||
}
|
125
internal/misc/misc_test.go
Normal file
125
internal/misc/misc_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package misc
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
subtests := []struct {
|
||||
name string
|
||||
inA map[string]string
|
||||
inB map[string]string
|
||||
expected map[string]string
|
||||
}{
|
||||
{
|
||||
name: "empty B",
|
||||
inA: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
inB: map[string]string{},
|
||||
expected: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty A",
|
||||
inA: map[string]string{},
|
||||
inB: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "both populated, some duplicates",
|
||||
inA: map[string]string{
|
||||
"bar": "bazz",
|
||||
"banana": "yellow",
|
||||
"guava": "green",
|
||||
},
|
||||
inB: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"foo": "bar",
|
||||
"guava": "green",
|
||||
"banana": "airplane",
|
||||
"bar": "bazz",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, st := range subtests {
|
||||
t.Run(st.name, func(t *testing.T) {
|
||||
out := st.inA
|
||||
Merge(out, st.inB)
|
||||
if !reflect.DeepEqual(st.expected, out) {
|
||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveDuplicates(t *testing.T) {
|
||||
subtests := []struct {
|
||||
name string
|
||||
in []string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "no duplicates",
|
||||
in: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"banana",
|
||||
"airplane",
|
||||
},
|
||||
expected: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"banana",
|
||||
"airplane",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all duplicates",
|
||||
in: []string{
|
||||
"foo",
|
||||
"foo",
|
||||
"foo",
|
||||
"foo",
|
||||
},
|
||||
expected: []string{
|
||||
"foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
in: []string{},
|
||||
expected: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, st := range subtests {
|
||||
t.Run(st.name, func(t *testing.T) {
|
||||
// note: sorting to make comparison easier later
|
||||
sort.Strings(st.expected)
|
||||
out := RemoveDuplicates(st.in)
|
||||
sort.Strings(out)
|
||||
if !reflect.DeepEqual(st.expected, out) {
|
||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
107
internal/osutil/osutil.go
Normal file
107
internal/osutil/osutil.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package osutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Try to guess whether the system has merged dirs under /usr
|
||||
func HasMergedUsr() bool {
|
||||
for _, dir := range []string{"/bin", "/lib"} {
|
||||
stat, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
// TODO: probably because the dir doesn't exist... so
|
||||
// should we assume that it's because the system has some weird
|
||||
// implementation of "merge /usr"?
|
||||
return true
|
||||
} else if stat.Mode()&os.ModeSymlink == 0 {
|
||||
// Not a symlink, so must not be merged /usr
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Converts given path to one supported by a merged /usr config.
|
||||
// E.g., /bin/foo becomes /usr/bin/foo, /lib/bar becomes /usr/lib/bar
|
||||
// See: https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge
|
||||
func MergeUsr(file string) string {
|
||||
|
||||
// Prepend /usr to supported paths
|
||||
for _, prefix := range []string{"/bin", "/sbin", "/lib", "/lib64"} {
|
||||
if strings.HasPrefix(file, prefix) {
|
||||
file = filepath.Join("/usr", file)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
||||
// absolute path
|
||||
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
||||
var path string
|
||||
|
||||
oldWd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Print("Unable to get current working dir")
|
||||
return path, err
|
||||
}
|
||||
|
||||
if err := os.Chdir(dir); err != nil {
|
||||
log.Print("Unable to change to working dir: ", dir)
|
||||
return path, err
|
||||
}
|
||||
|
||||
path, err = filepath.Abs(symPath)
|
||||
if err != nil {
|
||||
log.Print("Unable to resolve abs path to: ", symPath)
|
||||
return path, err
|
||||
}
|
||||
|
||||
if err := os.Chdir(oldWd); err != nil {
|
||||
log.Print("Unable to change to old working dir")
|
||||
return path, err
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func FreeSpace(path string) (uint64, error) {
|
||||
var stat unix.Statfs_t
|
||||
unix.Statfs(path, &stat)
|
||||
size := stat.Bavail * uint64(stat.Bsize)
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func getKernelReleaseFile() (string, error) {
|
||||
files, _ := filepath.Glob("/usr/share/kernel/*/kernel.release")
|
||||
// only one kernel flavor supported
|
||||
if len(files) != 1 {
|
||||
return "", fmt.Errorf("only one kernel release/flavor is supported, found: %q", files)
|
||||
}
|
||||
|
||||
return files[0], nil
|
||||
}
|
||||
|
||||
func GetKernelVersion() (string, error) {
|
||||
var version string
|
||||
|
||||
releaseFile, err := getKernelReleaseFile()
|
||||
if err != nil {
|
||||
return version, err
|
||||
}
|
||||
|
||||
contents, err := os.ReadFile(releaseFile)
|
||||
if err != nil {
|
||||
return version, err
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(contents)), nil
|
||||
}
|
49
internal/osutil/osutil_test.go
Normal file
49
internal/osutil/osutil_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright 2024 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package osutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMergeUsr(t *testing.T) {
|
||||
subtests := []struct {
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
in: "/bin/foo",
|
||||
expected: "/usr/bin/foo",
|
||||
},
|
||||
{
|
||||
in: "/sbin/foo",
|
||||
expected: "/usr/sbin/foo",
|
||||
},
|
||||
{
|
||||
in: "/usr/sbin/foo",
|
||||
expected: "/usr/sbin/foo",
|
||||
},
|
||||
{
|
||||
in: "/usr/bin/foo",
|
||||
expected: "/usr/bin/foo",
|
||||
},
|
||||
{
|
||||
in: "/lib/foo.so",
|
||||
expected: "/usr/lib/foo.so",
|
||||
},
|
||||
{
|
||||
in: "/lib64/foo.so",
|
||||
expected: "/usr/lib64/foo.so",
|
||||
},
|
||||
}
|
||||
|
||||
for _, st := range subtests {
|
||||
t.Run(st.in, func(t *testing.T) {
|
||||
out := MergeUsr(st.in)
|
||||
if out != st.expected {
|
||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
775
main.go
775
main.go
@@ -1,775 +0,0 @@
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"debug/elf"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/archive"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
||||
)
|
||||
|
||||
func timeFunc(start time.Time, name string) {
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("%s completed in: %s", name, elapsed)
|
||||
}
|
||||
|
||||
func main() {
|
||||
deviceinfoFile := "/etc/deviceinfo"
|
||||
if !exists(deviceinfoFile) {
|
||||
log.Print("NOTE: deviceinfo (from device package) not installed yet, " +
|
||||
"not building the initramfs now (it should get built later " +
|
||||
"automatically.)")
|
||||
return
|
||||
}
|
||||
|
||||
devinfo, err := deviceinfo.ReadDeviceinfo(deviceinfoFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
outDir := flag.String("d", "/boot", "Directory to output initfs(-extra) and other boot files")
|
||||
flag.Parse()
|
||||
|
||||
defer timeFunc(time.Now(), "mkinitfs")
|
||||
|
||||
kernVer, err := getKernelVersion()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// temporary working dir
|
||||
workDir, err := ioutil.TempDir("", "mkinitfs")
|
||||
if err != nil {
|
||||
log.Fatal("Unable to create temporary work directory:", err)
|
||||
}
|
||||
defer os.RemoveAll(workDir)
|
||||
|
||||
log.Print("Generating for kernel version: ", kernVer)
|
||||
log.Print("Output directory: ", *outDir)
|
||||
|
||||
if err := generateInitfs("initramfs", workDir, kernVer, devinfo); err != nil {
|
||||
log.Fatal("generateInitfs: ", err)
|
||||
}
|
||||
|
||||
if err := generateInitfsExtra("initramfs-extra", workDir, devinfo); err != nil {
|
||||
log.Fatal("generateInitfsExtra: ", err)
|
||||
}
|
||||
|
||||
if err := copyUbootFiles(workDir, devinfo); errors.Is(err, os.ErrNotExist) {
|
||||
log.Println("u-boot files copying skipped: ", err)
|
||||
} else {
|
||||
if err != nil {
|
||||
log.Fatal("copyUbootFiles: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Final processing of initramfs / kernel is done by boot-deploy
|
||||
if err := bootDeploy(workDir, *outDir); err != nil {
|
||||
log.Fatal("bootDeploy: ", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func bootDeploy(workDir string, outDir string) error {
|
||||
// boot-deploy expects the kernel to be in the same dir as initramfs.
|
||||
// Assume that the kernel is in the output dir...
|
||||
log.Print("== Using boot-deploy to finalize/install files ==")
|
||||
kernels, _ := filepath.Glob(filepath.Join(outDir, "vmlinuz*"))
|
||||
if len(kernels) == 0 {
|
||||
return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*"))
|
||||
}
|
||||
|
||||
// Pick a kernel that does not have suffixes added by boot-deploy
|
||||
var kernFile string
|
||||
for _, f := range kernels {
|
||||
if strings.HasSuffix(f, "-dtb") || strings.HasSuffix(f, "-mtk") {
|
||||
continue
|
||||
}
|
||||
kernFile = f
|
||||
break
|
||||
}
|
||||
|
||||
kernFd, err := os.Open(kernFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer kernFd.Close()
|
||||
|
||||
kernFileCopy, err := os.Create(filepath.Join(workDir, "vmlinuz"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(kernFileCopy, kernFd); err != nil {
|
||||
return err
|
||||
}
|
||||
kernFileCopy.Close()
|
||||
|
||||
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
|
||||
cmd := exec.Command("boot-deploy",
|
||||
"-i", "initramfs",
|
||||
"-k", "vmlinuz",
|
||||
"-d", workDir,
|
||||
"-o", outDir,
|
||||
"initramfs-extra")
|
||||
if !exists(cmd.Path) {
|
||||
return errors.New("boot-deploy command not found")
|
||||
}
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
log.Print("'boot-deploy' command failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func exists(file string) bool {
|
||||
if _, err := os.Stat(file); err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getHookFiles(filesdir string) misc.StringSet {
|
||||
fileInfo, err := ioutil.ReadDir(filesdir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
files := make(misc.StringSet)
|
||||
for _, file := range fileInfo {
|
||||
path := filepath.Join(filesdir, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
if err := getFile(files, s.Text(), true); err != nil {
|
||||
log.Fatalf("Unable to find file %q required by %q", s.Text(), path)
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
// Recursively list all dependencies for a given ELF binary
|
||||
func getBinaryDeps(files misc.StringSet, file string) error {
|
||||
// if file is a symlink, resolve dependencies for target
|
||||
fileStat, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
log.Print("getBinaryDeps: failed to stat file")
|
||||
return err
|
||||
}
|
||||
|
||||
// Symlink: write symlink to archive then set 'file' to link target
|
||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||
target, err := os.Readlink(file)
|
||||
if err != nil {
|
||||
log.Print("getBinaryDeps: unable to read symlink: ", file)
|
||||
return err
|
||||
}
|
||||
if !filepath.IsAbs(target) {
|
||||
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := getBinaryDeps(files, target); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// get dependencies for binaries
|
||||
fd, err := elf.Open(file)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
libs, _ := fd.ImportedLibraries()
|
||||
fd.Close()
|
||||
files[file] = false
|
||||
|
||||
if len(libs) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
libdirs := []string{"/usr/lib", "/lib"}
|
||||
for _, lib := range libs {
|
||||
found := false
|
||||
for _, libdir := range libdirs {
|
||||
path := filepath.Join(libdir, lib)
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
err := getBinaryDeps(files, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files[path] = false
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
log.Fatalf("Unable to locate dependency for %q: %s", file, lib)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFiles(files misc.StringSet, newFiles misc.StringSet, required bool) error {
|
||||
for file := range newFiles {
|
||||
err := getFile(files, file, required)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFile(files misc.StringSet, file string, required bool) error {
|
||||
// Expand glob expression
|
||||
expanded, _ := filepath.Glob(file)
|
||||
if len(expanded) > 0 && expanded[0] != file {
|
||||
for _, path := range expanded {
|
||||
if err := getFile(files, path, required); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(file)
|
||||
if err != nil {
|
||||
if required {
|
||||
return errors.New("getFile: File does not exist :" + file)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if fileInfo.IsDir() {
|
||||
// Recurse over directory contents
|
||||
err := filepath.Walk(file, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
return getFile(files, path, required)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
files[file] = false
|
||||
|
||||
// get dependencies for binaries
|
||||
if _, err := elf.Open(file); err != nil {
|
||||
// file is not an elf, so don't resolve lib dependencies
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := getBinaryDeps(files, file); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getOskConfFontPath(oskConfPath string) (string, error) {
|
||||
var path string
|
||||
f, err := os.Open(oskConfPath)
|
||||
if err != nil {
|
||||
return path, err
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
// "key = val" is 3 fields
|
||||
if len(fields) > 2 && fields[0] == "keyboard-font" {
|
||||
path = fields[2]
|
||||
}
|
||||
}
|
||||
if !exists(path) {
|
||||
return path, errors.New("Unable to find font: " + path)
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// Get a list of files and their dependencies related to supporting rootfs full
|
||||
// disk (d)encryption
|
||||
func getFdeFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||
confFiles := misc.StringSet{
|
||||
"/etc/osk.conf": false,
|
||||
"/etc/ts.conf": false,
|
||||
"/etc/pointercal": false,
|
||||
"/etc/fb.modes": false,
|
||||
"/etc/directfbrc": false,
|
||||
}
|
||||
// TODO: this shouldn't be false? though some files (pointercal) don't always exist...
|
||||
if err := getFiles(files, confFiles, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// osk-sdl
|
||||
oskFiles := misc.StringSet{
|
||||
"/usr/bin/osk-sdl": false,
|
||||
"/sbin/cryptsetup": false,
|
||||
"/usr/lib/libGL.so.1": false}
|
||||
if err := getFiles(files, oskFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fontFile, err := getOskConfFontPath("/etc/osk.conf")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files[fontFile] = false
|
||||
|
||||
// Directfb
|
||||
dfbFiles := make(misc.StringSet)
|
||||
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
|
||||
if filepath.Ext(path) == ".so" {
|
||||
dfbFiles[path] = false
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Print("getBinaryDeps: failed to stat file")
|
||||
return err
|
||||
}
|
||||
if err := getFiles(files, dfbFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// tslib
|
||||
tslibFiles := make(misc.StringSet)
|
||||
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
|
||||
if filepath.Ext(path) == ".so" {
|
||||
tslibFiles[path] = false
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Print("getBinaryDeps: failed to stat file")
|
||||
return err
|
||||
}
|
||||
libts, _ := filepath.Glob("/usr/lib/libts*")
|
||||
for _, file := range libts {
|
||||
tslibFiles[file] = false
|
||||
}
|
||||
if err = getFiles(files, tslibFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// mesa hw accel
|
||||
if devinfo.MesaDriver != "" {
|
||||
mesaFiles := misc.StringSet{
|
||||
"/usr/lib/libEGL.so.1": false,
|
||||
"/usr/lib/libGLESv2.so.2": false,
|
||||
"/usr/lib/libgbm.so.1": false,
|
||||
"/usr/lib/libudev.so.1": false,
|
||||
"/usr/lib/xorg/modules/dri/" + devinfo.MesaDriver + "_dri.so": false,
|
||||
}
|
||||
if err := getFiles(files, mesaFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHookScripts(files misc.StringSet) {
|
||||
scripts, _ := filepath.Glob("/etc/postmarketos-mkinitfs/hooks/*.sh")
|
||||
for _, script := range scripts {
|
||||
files[script] = false
|
||||
}
|
||||
}
|
||||
|
||||
func getInitfsExtraFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||
log.Println("== Generating initramfs extra ==")
|
||||
binariesExtra := misc.StringSet{
|
||||
"/lib/libz.so.1": false,
|
||||
"/sbin/btrfs": false,
|
||||
"/sbin/dmsetup": false,
|
||||
"/sbin/e2fsck": false,
|
||||
"/usr/sbin/parted": false,
|
||||
"/usr/sbin/resize2fs": false,
|
||||
"/usr/sbin/resize.f2fs": false,
|
||||
}
|
||||
log.Println("- Including extra binaries")
|
||||
if err := getFiles(files, binariesExtra, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists("/usr/bin/osk-sdl") {
|
||||
log.Println("- Including FDE support")
|
||||
if err := getFdeFiles(files, devinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Println("- *NOT* including FDE support")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInitfsFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||
log.Println("== Generating initramfs ==")
|
||||
requiredFiles := misc.StringSet{
|
||||
"/bin/busybox": false,
|
||||
"/bin/sh": false,
|
||||
"/bin/busybox-extras": false,
|
||||
"/usr/sbin/telnetd": false,
|
||||
"/sbin/kpartx": false,
|
||||
"/etc/deviceinfo": false,
|
||||
"/usr/bin/unudhcpd": false,
|
||||
}
|
||||
|
||||
// Hook files & scripts
|
||||
if exists("/etc/postmarketos-mkinitfs/files") {
|
||||
log.Println("- Including hook files")
|
||||
hookFiles := getHookFiles("/etc/postmarketos-mkinitfs/files")
|
||||
if err := getFiles(files, hookFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Println("- Including hook scripts")
|
||||
getHookScripts(files)
|
||||
|
||||
log.Println("- Including required binaries")
|
||||
if err := getFiles(files, requiredFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInitfsModules(files misc.StringSet, devinfo deviceinfo.DeviceInfo, kernelVer string) error {
|
||||
log.Println("- Including kernel modules")
|
||||
|
||||
modDir := filepath.Join("/lib/modules", kernelVer)
|
||||
if !exists(modDir) {
|
||||
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
||||
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
||||
return nil
|
||||
}
|
||||
|
||||
// modules.* required by modprobe
|
||||
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
|
||||
for _, file := range modprobeFiles {
|
||||
files[file] = false
|
||||
}
|
||||
|
||||
// module name (without extension), or directory (trailing slash is important! globs OK)
|
||||
requiredModules := []string{
|
||||
"loop",
|
||||
"dm-crypt",
|
||||
"kernel/fs/overlayfs/",
|
||||
"kernel/crypto/",
|
||||
"kernel/arch/*/crypto/",
|
||||
}
|
||||
|
||||
for _, item := range requiredModules {
|
||||
dir, file := filepath.Split(item)
|
||||
if file == "" {
|
||||
// item is a directory
|
||||
dir = filepath.Join(modDir, dir)
|
||||
dirs, _ := filepath.Glob(dir)
|
||||
for _, d := range dirs {
|
||||
if err := getModulesInDir(files, d); err != nil {
|
||||
log.Print("Unable to get modules in dir: ", d)
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if dir == "" {
|
||||
// item is a module name
|
||||
if err := getModule(files, file, modDir); err != nil {
|
||||
log.Print("Unable to get module: ", file)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unknown module entry: %q", item)
|
||||
}
|
||||
}
|
||||
|
||||
// deviceinfo modules
|
||||
for _, module := range strings.Fields(devinfo.ModulesInitfs) {
|
||||
if err := getModule(files, module, modDir); err != nil {
|
||||
log.Print("Unable to get modules from deviceinfo")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// /etc/postmarketos-mkinitfs/modules/*.modules
|
||||
initfsModFiles, _ := filepath.Glob("/etc/postmarketos-mkinitfs/modules/*.modules")
|
||||
for _, modFile := range initfsModFiles {
|
||||
f, err := os.Open(modFile)
|
||||
if err != nil {
|
||||
log.Print("getInitfsModules: unable to open mkinitfs modules file: ", modFile)
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
if err := getModule(files, s.Text(), modDir); err != nil {
|
||||
log.Print("getInitfsModules: unable to get module file: ", s.Text())
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getKernelReleaseFile() (string, error) {
|
||||
files, _ := filepath.Glob("/usr/share/kernel/*/kernel.release")
|
||||
// only one kernel flavor supported
|
||||
if len(files) != 1 {
|
||||
return "", fmt.Errorf("only one kernel release/flavor is supported, found: %q", files)
|
||||
}
|
||||
|
||||
return files[0], nil
|
||||
}
|
||||
|
||||
func getKernelVersion() (string, error) {
|
||||
var version string
|
||||
|
||||
releaseFile, err := getKernelReleaseFile()
|
||||
if err != nil {
|
||||
return version, err
|
||||
}
|
||||
|
||||
contents, err := os.ReadFile(releaseFile)
|
||||
if err != nil {
|
||||
return version, err
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(contents)), nil
|
||||
}
|
||||
|
||||
func Copy(srcFile, dstFile string) error {
|
||||
out, err := os.Create(dstFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer out.Close()
|
||||
|
||||
in, err := os.Open(srcFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyUbootFiles(path string, devinfo deviceinfo.DeviceInfo) error {
|
||||
if devinfo.UbootBoardname == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
srcDir := filepath.Join("/usr/share/u-boot", devinfo.UbootBoardname)
|
||||
entries, err := ioutil.ReadDir(srcDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
sourcePath := filepath.Join(srcDir, entry.Name())
|
||||
destPath := filepath.Join(path, entry.Name())
|
||||
|
||||
if err := Copy(sourcePath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo.DeviceInfo) error {
|
||||
initfsArchive, err := archive.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requiredDirs := []string{
|
||||
"/bin", "/sbin", "/usr/bin", "/usr/sbin", "/proc", "/sys",
|
||||
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
|
||||
}
|
||||
for _, dir := range requiredDirs {
|
||||
initfsArchive.Dirs[dir] = false
|
||||
}
|
||||
|
||||
if err := getInitfsFiles(initfsArchive.Files, devinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := getInitfsModules(initfsArchive.Files, devinfo, kernVer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// splash images
|
||||
log.Println("- Including splash images")
|
||||
splashFiles, _ := filepath.Glob("/usr/share/postmarketos-splashes/*.ppm.gz")
|
||||
for _, file := range splashFiles {
|
||||
// splash images are expected at /<file>
|
||||
if err := initfsArchive.AddFile(file, filepath.Join("/", filepath.Base(file))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// initfs_functions
|
||||
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Println("- Writing and verifying initramfs archive")
|
||||
if err := initfsArchive.Write(filepath.Join(path, name), os.FileMode(0644)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateInitfsExtra(name string, path string, devinfo deviceinfo.DeviceInfo) error {
|
||||
initfsExtraArchive, err := archive.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := getInitfsExtraFiles(initfsExtraArchive.Files, devinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Println("- Writing and verifying initramfs-extra archive")
|
||||
if err := initfsExtraArchive.Write(filepath.Join(path, name), os.FileMode(0644)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func stripExts(file string) string {
|
||||
return strings.Split(file, ".")[0]
|
||||
}
|
||||
|
||||
func getModulesInDir(files misc.StringSet, modPath string) error {
|
||||
err := filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error {
|
||||
// TODO: need to support more extensions?
|
||||
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
|
||||
return nil
|
||||
}
|
||||
files[path] = false
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
|
||||
// file and all of its dependencies.
|
||||
// Note: it's not necessarily fatal if the module is not found, since it may
|
||||
// have been built into the kernel
|
||||
// TODO: look for it in modules.builtin, and make it fatal if it can't be found
|
||||
// anywhere
|
||||
func getModule(files misc.StringSet, modName string, modDir string) error {
|
||||
|
||||
modDep := filepath.Join(modDir, "modules.dep")
|
||||
if !exists(modDep) {
|
||||
log.Fatal("Kernel module.dep not found: ", modDir)
|
||||
}
|
||||
|
||||
fd, err := os.Open(modDep)
|
||||
if err != nil {
|
||||
log.Print("Unable to open modules.dep: ", modDep)
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
deps, err := getModuleDeps(modName, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, dep := range deps {
|
||||
p := filepath.Join(modDir, dep)
|
||||
if !exists(p) {
|
||||
log.Print(fmt.Sprintf("Tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p))
|
||||
return err
|
||||
}
|
||||
files[p] = false
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the canonicalized name for the module as represented in the given modules.dep io.reader
|
||||
func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
|
||||
var deps []string
|
||||
|
||||
// split the module name on - and/or _, build a regex for matching
|
||||
splitRe := regexp.MustCompile("[-_]+")
|
||||
modNameReStr := splitRe.ReplaceAllString(modName, "[-_]+")
|
||||
re := regexp.MustCompile("^" + modNameReStr + "$")
|
||||
|
||||
s := bufio.NewScanner(modulesDep)
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
if len(fields) == 0 {
|
||||
continue
|
||||
}
|
||||
fields[0] = strings.TrimSuffix(fields[0], ":")
|
||||
|
||||
found := re.FindAll([]byte(filepath.Base(stripExts(fields[0]))), -1)
|
||||
if len(found) > 0 {
|
||||
deps = append(deps, fields...)
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
log.Print("Unable to get module + dependencies: ", modName)
|
||||
return deps, err
|
||||
}
|
||||
|
||||
return deps, nil
|
||||
}
|
@@ -1,225 +0,0 @@
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"github.com/cavaliercoder/go-cpio"
|
||||
"github.com/klauspost/pgzip"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Archive struct {
|
||||
Dirs misc.StringSet
|
||||
Files misc.StringSet
|
||||
cpioWriter *cpio.Writer
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
func New() (*Archive, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
archive := &Archive{
|
||||
cpioWriter: cpio.NewWriter(buf),
|
||||
Files: make(misc.StringSet),
|
||||
Dirs: make(misc.StringSet),
|
||||
buf: buf,
|
||||
}
|
||||
|
||||
return archive, nil
|
||||
}
|
||||
|
||||
func (archive *Archive) Write(path string, mode os.FileMode) error {
|
||||
if err := archive.writeCpio(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := archive.cpioWriter.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write archive to path
|
||||
if err := archive.writeCompressed(path, mode); err != nil {
|
||||
log.Print("Unable to write archive to location: ", path)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) AddFile(file string, dest string) error {
|
||||
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if archive.Files[file] {
|
||||
// Already written to cpio
|
||||
return nil
|
||||
}
|
||||
|
||||
fileStat, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
log.Print("AddFile: failed to stat file: ", file)
|
||||
return err
|
||||
}
|
||||
|
||||
// Symlink: write symlink to archive then set 'file' to link target
|
||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||
// log.Printf("File %q is a symlink", file)
|
||||
target, err := os.Readlink(file)
|
||||
if err != nil {
|
||||
log.Print("AddFile: failed to get symlink target: ", file)
|
||||
return err
|
||||
}
|
||||
|
||||
destFilename := strings.TrimPrefix(dest, "/")
|
||||
hdr := &cpio.Header{
|
||||
Name: destFilename,
|
||||
Linkname: target,
|
||||
Mode: 0644 | cpio.ModeSymlink,
|
||||
Size: int64(len(target)),
|
||||
// Checksum: 1,
|
||||
}
|
||||
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = archive.cpioWriter.Write([]byte(target)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
archive.Files[file] = true
|
||||
if filepath.Dir(target) == "." {
|
||||
target = filepath.Join(filepath.Dir(file), target)
|
||||
}
|
||||
// make sure target is an absolute path
|
||||
if !filepath.IsAbs(target) {
|
||||
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// TODO: add verbose mode, print stuff like this:
|
||||
// log.Printf("symlink: %q, target: %q", file, target)
|
||||
// write symlink target
|
||||
err = archive.AddFile(target, target)
|
||||
return err
|
||||
}
|
||||
|
||||
// log.Printf("writing file: %q", file)
|
||||
|
||||
fd, err := os.Open(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
destFilename := strings.TrimPrefix(dest, "/")
|
||||
hdr := &cpio.Header{
|
||||
Name: destFilename,
|
||||
Mode: cpio.FileMode(fileStat.Mode().Perm()),
|
||||
Size: fileStat.Size(),
|
||||
// Checksum: 1,
|
||||
}
|
||||
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(archive.cpioWriter, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
archive.Files[file] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) writeCompressed(path string, mode os.FileMode) error {
|
||||
// TODO: support other compression formats, based on deviceinfo
|
||||
fd, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gz, err := pgzip.NewWriterLevel(fd, flate.BestSpeed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(gz, archive.buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := gz.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// call fsync just to be sure
|
||||
if err := fd.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) writeCpio() error {
|
||||
// Write any dirs added explicitly
|
||||
for dir := range archive.Dirs {
|
||||
archive.addDir(dir)
|
||||
}
|
||||
|
||||
// Write files and any missing parent dirs
|
||||
for file, imported := range archive.Files {
|
||||
if imported {
|
||||
continue
|
||||
}
|
||||
if err := archive.AddFile(file, file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) addDir(dir string) error {
|
||||
if archive.Dirs[dir] {
|
||||
// Already imported
|
||||
return nil
|
||||
}
|
||||
if dir == "/" {
|
||||
dir = "."
|
||||
}
|
||||
|
||||
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
|
||||
for i, subdir := range subdirs {
|
||||
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
|
||||
if archive.Dirs[path] {
|
||||
// Subdir already imported
|
||||
continue
|
||||
}
|
||||
err := archive.cpioWriter.WriteHeader(&cpio.Header{
|
||||
Name: path,
|
||||
Mode: cpio.ModeDir | 0755,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
archive.Dirs[path] = true
|
||||
// log.Print("wrote dir: ", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@@ -4,107 +4,82 @@
|
||||
package deviceinfo
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mvdan/sh/shell"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||
)
|
||||
|
||||
type DeviceInfo struct {
|
||||
AppendDtb string
|
||||
Arch string
|
||||
UbootBoardname string
|
||||
BootimgAppendSEAndroidEnforce string
|
||||
BootimgBlobpack string
|
||||
BootimgDtbSecond string
|
||||
BootimgMtkMkimage string
|
||||
BootimgPxa string
|
||||
BootimgQcdt string
|
||||
Dtb string
|
||||
FlashKernelOnUpdate string
|
||||
FlashOffsetBase string
|
||||
FlashOffsetKernel string
|
||||
FlashOffsetRamdisk string
|
||||
FlashOffsetSecond string
|
||||
FlashOffsetTags string
|
||||
FlashPagesize string
|
||||
GenerateBootimg string
|
||||
GenerateLegacyUbootInitfs string
|
||||
InitfsCompression string
|
||||
KernelCmdline string
|
||||
LegacyUbootLoadAddress string
|
||||
MesaDriver string
|
||||
MkinitfsPostprocess string
|
||||
ModulesInitfs string
|
||||
InitfsCompression string
|
||||
InitfsExtraCompression string
|
||||
UbootBoardname string
|
||||
GenerateSystemdBoot string
|
||||
FormatVersion string
|
||||
CreateInitfsExtra bool
|
||||
}
|
||||
|
||||
func ReadDeviceinfo(file string) (DeviceInfo, error) {
|
||||
var deviceinfo DeviceInfo
|
||||
|
||||
fd, err := os.Open(file)
|
||||
if err != nil {
|
||||
return deviceinfo, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
if err := unmarshal(fd, &deviceinfo); err != nil {
|
||||
return deviceinfo, err
|
||||
// Reads the relevant entries from "file" into DeviceInfo struct
|
||||
// Any already-set entries will be overwriten if they are present
|
||||
// in "file"
|
||||
func (d *DeviceInfo) ReadDeviceinfo(file string) error {
|
||||
if exists, err := misc.Exists(file); !exists {
|
||||
return fmt.Errorf("%q not found, required by mkinitfs", file)
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("unexpected error getting status for %q: %s", file, err)
|
||||
}
|
||||
|
||||
return deviceinfo, nil
|
||||
if err := d.unmarshal(file); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmarshals a deviceinfo into a DeviceInfo struct
|
||||
func unmarshal(r io.Reader, devinfo *DeviceInfo) error {
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
if strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
func (d *DeviceInfo) unmarshal(file string) error {
|
||||
ctx, cancelCtx := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
|
||||
defer cancelCtx()
|
||||
vars, err := shell.SourceFile(ctx, file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing deviceinfo %q failed: %w", file, err)
|
||||
}
|
||||
|
||||
// line isn't setting anything, so just ignore it
|
||||
if !strings.Contains(line, "=") {
|
||||
continue
|
||||
}
|
||||
|
||||
// sometimes line has a comment at the end after setting an option
|
||||
line = strings.SplitN(line, "#", 2)[0]
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
// must support having '=' in the value (e.g. kernel cmdline)
|
||||
parts := strings.SplitN(line, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
|
||||
}
|
||||
|
||||
name, val := parts[0], parts[1]
|
||||
val = strings.ReplaceAll(val, "\"", "")
|
||||
|
||||
if name == "deviceinfo_format_version" && val != "0" {
|
||||
return fmt.Errorf("deviceinfo format version %q is not supported", val)
|
||||
}
|
||||
|
||||
fieldName := nameToField(name)
|
||||
|
||||
if fieldName == "" {
|
||||
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
|
||||
}
|
||||
|
||||
field := reflect.ValueOf(devinfo).Elem().FieldByName(fieldName)
|
||||
for k, v := range vars {
|
||||
fieldName := nameToField(k)
|
||||
field := reflect.ValueOf(d).Elem().FieldByName(fieldName)
|
||||
if !field.IsValid() {
|
||||
// an option that meets the deviceinfo "specification", but isn't
|
||||
// one we care about in this module
|
||||
continue
|
||||
}
|
||||
field.SetString(val)
|
||||
switch field.Interface().(type) {
|
||||
case string:
|
||||
field.SetString(v.String())
|
||||
case bool:
|
||||
if v, err := strconv.ParseBool(v.String()); err != nil {
|
||||
return fmt.Errorf("deviceinfo %q has unsupported type for field %q, expected 'bool'", file, k)
|
||||
} else {
|
||||
field.SetBool(v)
|
||||
}
|
||||
case int:
|
||||
if v, err := strconv.ParseInt(v.String(), 10, 32); err != nil {
|
||||
return fmt.Errorf("deviceinfo %q has unsupported type for field %q, expected 'int'", file, k)
|
||||
} else {
|
||||
field.SetInt(v)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("deviceinfo %q has unsupported type for field %q", file, k)
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
log.Print("unable to parse deviceinfo: ", err)
|
||||
return err
|
||||
|
||||
if d.FormatVersion != "0" {
|
||||
return fmt.Errorf("deviceinfo %q has an unsupported format version %q", file, d.FormatVersion)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -120,8 +95,33 @@ func nameToField(name string) string {
|
||||
if p == "deviceinfo" {
|
||||
continue
|
||||
}
|
||||
field = field + strings.Title(p)
|
||||
if len(p) < 1 {
|
||||
continue
|
||||
}
|
||||
field = field + strings.ToUpper(p[:1]) + p[1:]
|
||||
}
|
||||
|
||||
return field
|
||||
}
|
||||
|
||||
func (d DeviceInfo) String() string {
|
||||
return fmt.Sprintf(`{
|
||||
%s: %v
|
||||
%s: %v
|
||||
%s: %v
|
||||
%s: %v
|
||||
%s: %v
|
||||
%s: %v
|
||||
%s: %v
|
||||
%s: %v
|
||||
}`,
|
||||
"deviceinfo_format_version", d.FormatVersion,
|
||||
"deviceinfo_", d.FormatVersion,
|
||||
"deviceinfo_initfs_compression", d.InitfsCompression,
|
||||
"deviceinfo_initfs_extra_compression", d.InitfsCompression,
|
||||
"deviceinfo_ubootBoardname", d.UbootBoardname,
|
||||
"deviceinfo_generateSystemdBoot", d.GenerateSystemdBoot,
|
||||
"deviceinfo_formatVersion", d.FormatVersion,
|
||||
"deviceinfo_createInitfsExtra", d.CreateInitfsExtra,
|
||||
)
|
||||
}
|
||||
|
@@ -4,12 +4,32 @@
|
||||
package deviceinfo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test ReadDeviceinfo and the logic of reading from multiple files
|
||||
func TestReadDeviceinfo(t *testing.T) {
|
||||
compression_expected := "gz -9"
|
||||
|
||||
var devinfo DeviceInfo
|
||||
err := devinfo.ReadDeviceinfo("./test_resources/deviceinfo-missing")
|
||||
if !strings.Contains(err.Error(), "required by mkinitfs") {
|
||||
t.Errorf("received an unexpected err: %s", err)
|
||||
}
|
||||
err = devinfo.ReadDeviceinfo("./test_resources/deviceinfo-first")
|
||||
if err != nil {
|
||||
t.Errorf("received an unexpected err: %s", err)
|
||||
}
|
||||
err = devinfo.ReadDeviceinfo("./test_resources/deviceinfo-msm")
|
||||
if err != nil {
|
||||
t.Errorf("received an unexpected err: %s", err)
|
||||
}
|
||||
if devinfo.InitfsCompression != compression_expected {
|
||||
t.Errorf("expected %q, got: %q", compression_expected, devinfo.InitfsCompression)
|
||||
}
|
||||
}
|
||||
|
||||
// Test conversion of name to DeviceInfo struct field format
|
||||
func TestNameToField(t *testing.T) {
|
||||
tables := []struct {
|
||||
@@ -18,9 +38,11 @@ func TestNameToField(t *testing.T) {
|
||||
}{
|
||||
{"deviceinfo_dtb", "Dtb"},
|
||||
{"dtb", "Dtb"},
|
||||
{"deviceinfo_modules_initfs", "ModulesInitfs"},
|
||||
{"deviceinfo_initfs_compression", "InitfsCompression"},
|
||||
{"modules_initfs", "ModulesInitfs"},
|
||||
{"deviceinfo_modules_initfs___", "ModulesInitfs"},
|
||||
{"deviceinfo_initfs_compression___", "InitfsCompression"},
|
||||
{"deviceinfo_initfs_extra_compression", "InitfsExtraCompression"},
|
||||
{"deviceinfo_create_initfs_extra", "CreateInitfsExtra"},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
@@ -36,45 +58,25 @@ func TestUnmarshal(t *testing.T) {
|
||||
tables := []struct {
|
||||
// field is just used for reflection within the test, so it must be a
|
||||
// valid DeviceInfo field
|
||||
field string
|
||||
in string
|
||||
expected string
|
||||
file string
|
||||
expected DeviceInfo
|
||||
}{
|
||||
{"ModulesInitfs", "deviceinfo_modules_initfs=\"panfrost foo bar bazz\"\n", "panfrost foo bar bazz"},
|
||||
{"ModulesInitfs", "deviceinfo_modules_initfs=\"panfrost foo bar bazz\"", "panfrost foo bar bazz"},
|
||||
// line with multiple '='
|
||||
{"KernelCmdline",
|
||||
"deviceinfo_kernel_cmdline=\"PMOS_NO_OUTPUT_REDIRECT fw_devlink=off nvme_core.default_ps_max_latency_us=5500 pcie_aspm.policy=performance\"\n",
|
||||
"PMOS_NO_OUTPUT_REDIRECT fw_devlink=off nvme_core.default_ps_max_latency_us=5500 pcie_aspm.policy=performance"},
|
||||
// empty option
|
||||
{"ModulesInitfs", "deviceinfo_modules_initfs=\"\"\n", ""},
|
||||
{"Dtb", "deviceinfo_dtb=\"freescale/imx8mq-librem5-r2 freescale/imx8mq-librem5-r3 freescale/imx8mq-librem5-r4\"\n",
|
||||
"freescale/imx8mq-librem5-r2 freescale/imx8mq-librem5-r3 freescale/imx8mq-librem5-r4"},
|
||||
// valid deviceinfo line, just not used in this module
|
||||
{"", "deviceinfo_codename=\"pine64-pinebookpro\"", ""},
|
||||
// line with comment at the end
|
||||
{"MesaDriver", "deviceinfo_mesa_driver=\"panfrost\" # this is a nice driver", "panfrost"},
|
||||
{"", "# this is a comment!\n", ""},
|
||||
// empty lines are fine
|
||||
{"", "", ""},
|
||||
// line with whitepace characters only
|
||||
{"", " \t \n\r", ""},
|
||||
{"./test_resources/deviceinfo-unmarshal-1", DeviceInfo{
|
||||
FormatVersion: "0",
|
||||
UbootBoardname: "foobar-bazz",
|
||||
InitfsCompression: "zstd:--foo=1 -T0 --bar=bazz",
|
||||
InitfsExtraCompression: "",
|
||||
CreateInitfsExtra: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
var d DeviceInfo
|
||||
for _, table := range tables {
|
||||
testName := fmt.Sprintf("unmarshal::'%s':", strings.ReplaceAll(table.in, "\n", "\\n"))
|
||||
if err := unmarshal(strings.NewReader(table.in), &d); err != nil {
|
||||
t.Errorf("%s received an unexpected err: ", err)
|
||||
if err := d.unmarshal(table.file); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// Check against expected value
|
||||
field := reflect.ValueOf(&d).Elem().FieldByName(table.field)
|
||||
out := ""
|
||||
if table.field != "" {
|
||||
out = field.String()
|
||||
}
|
||||
if out != table.expected {
|
||||
t.Errorf("%s expected: %q, got: %q", testName, table.expected, out)
|
||||
if d != table.expected {
|
||||
t.Errorf("expected: %s, got: %s", table.expected, d)
|
||||
}
|
||||
}
|
||||
|
||||
|
3
pkgs/deviceinfo/test_resources/deviceinfo-first
Normal file
3
pkgs/deviceinfo/test_resources/deviceinfo-first
Normal file
@@ -0,0 +1,3 @@
|
||||
deviceinfo_format_version="0"
|
||||
deviceinfo_initfs_compression="gz -9"
|
||||
deviceinfo_mesa_driver="panfrost"
|
2
pkgs/deviceinfo/test_resources/deviceinfo-msm
Normal file
2
pkgs/deviceinfo/test_resources/deviceinfo-msm
Normal file
@@ -0,0 +1,2 @@
|
||||
deviceinfo_format_version="0"
|
||||
deviceinfo_mesa_driver="msm"
|
7
pkgs/deviceinfo/test_resources/deviceinfo-unmarshal-1
Normal file
7
pkgs/deviceinfo/test_resources/deviceinfo-unmarshal-1
Normal file
@@ -0,0 +1,7 @@
|
||||
deviceinfo_format_version="0"
|
||||
deviceinfo_uboot_boardname="foobar-bazz"
|
||||
# line with multiple =
|
||||
deviceinfo_initfs_compression="zstd:--foo=1 -T0 --bar=bazz"
|
||||
# empty option
|
||||
deviceinfo_initfs_extra_compression=""
|
||||
deviceinfo_create_initfs_extra="true" # in-line comment that should be ignored
|
@@ -1,50 +0,0 @@
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package misc
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type StringSet map[string]bool
|
||||
|
||||
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
||||
// absolute path
|
||||
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
||||
var path string
|
||||
|
||||
oldWd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Print("Unable to get current working dir")
|
||||
return path, err
|
||||
}
|
||||
|
||||
if err := os.Chdir(dir); err != nil {
|
||||
log.Print("Unable to change to working dir: ", dir)
|
||||
return path, err
|
||||
}
|
||||
|
||||
path, err = filepath.Abs(symPath)
|
||||
if err != nil {
|
||||
log.Print("Unable to resolve abs path to: ", symPath)
|
||||
return path, err
|
||||
}
|
||||
|
||||
if err := os.Chdir(oldWd); err != nil {
|
||||
log.Print("Unable to change to old working dir")
|
||||
return path, err
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func FreeSpace(path string) (uint64, error) {
|
||||
var stat unix.Statfs_t
|
||||
unix.Statfs(path, &stat)
|
||||
size := stat.Bavail * uint64(stat.Bsize)
|
||||
return size, nil
|
||||
}
|
Reference in New Issue
Block a user