Compare commits
1 Commits
jane400/ve
...
1.0.x
Author | SHA1 | Date | |
---|---|---|---|
|
8f53926fb5 |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1,5 +1 @@
|
|||||||
/*.1
|
/postmarketos-mkinitfs
|
||||||
/*.tar.gz
|
|
||||||
/*.sha512
|
|
||||||
/mkinitfs
|
|
||||||
/vendor
|
|
||||||
|
@@ -1,17 +1,11 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
# global settings
|
# global settings
|
||||||
image: alpine:edge
|
image: alpine:latest
|
||||||
|
|
||||||
variables:
|
|
||||||
GOFLAGS: "-buildvcs=false"
|
|
||||||
PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/mkinitfs-vendor-${CI_COMMIT_TAG}/${CI_COMMIT_TAG}"
|
|
||||||
|
|
||||||
stages:
|
stages:
|
||||||
- lint
|
- lint
|
||||||
- build
|
- build
|
||||||
- vendor
|
|
||||||
- release
|
|
||||||
|
|
||||||
# defaults for "only"
|
# defaults for "only"
|
||||||
# We need to run the CI jobs in a "merge request specific context", if CI is
|
# We need to run the CI jobs in a "merge request specific context", if CI is
|
||||||
@@ -27,37 +21,23 @@ stages:
|
|||||||
- merge_requests
|
- merge_requests
|
||||||
- tags
|
- tags
|
||||||
|
|
||||||
|
# device documentation
|
||||||
|
gofmt linting:
|
||||||
|
stage: lint
|
||||||
|
allow_failure: true
|
||||||
|
<<: *only-default
|
||||||
|
before_script:
|
||||||
|
- apk -q add go
|
||||||
|
script:
|
||||||
|
- .gitlab-ci/check_gofmt.sh
|
||||||
|
|
||||||
build:
|
build:
|
||||||
stage: build
|
stage: build
|
||||||
<<: *only-default
|
<<: *only-default
|
||||||
before_script:
|
before_script:
|
||||||
- apk -q add go staticcheck make scdoc
|
- apk -q add go
|
||||||
script:
|
script:
|
||||||
- make test
|
- go build -v
|
||||||
- make
|
- go test ./...
|
||||||
artifacts:
|
artifacts:
|
||||||
expire_in: 1 week
|
expire_in: 1 week
|
||||||
|
|
||||||
vendor:
|
|
||||||
stage: vendor
|
|
||||||
image: alpine:latest
|
|
||||||
only:
|
|
||||||
- tags
|
|
||||||
before_script:
|
|
||||||
- apk -q add curl go make
|
|
||||||
script:
|
|
||||||
- |
|
|
||||||
make VERSION="${CI_COMMIT_TAG}" vendor
|
|
||||||
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file "mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz" "${PACKAGE_REGISTRY_URL}/"
|
|
||||||
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file "mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512" "${PACKAGE_REGISTRY_URL}/"
|
|
||||||
|
|
||||||
release:
|
|
||||||
stage: release
|
|
||||||
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
|
||||||
only:
|
|
||||||
- tags
|
|
||||||
script:
|
|
||||||
- |
|
|
||||||
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \
|
|
||||||
--assets-link "{\"name\":\"mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz\",\"url\":\"${PACKAGE_REGISTRY_URL}/mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz\"}" \
|
|
||||||
--assets-link "{\"name\":\"mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512\",\"url\":\"${PACKAGE_REGISTRY_URL}/mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512\"}"
|
|
||||||
|
11
.gitlab-ci/check_gofmt.sh
Executable file
11
.gitlab-ci/check_gofmt.sh
Executable file
@@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
files="$(gofmt -l .)"
|
||||||
|
|
||||||
|
[ -z "$files" ] && exit 0
|
||||||
|
|
||||||
|
# run gofmt to print out the diff of what needs to be changed
|
||||||
|
|
||||||
|
gofmt -d -e .
|
||||||
|
|
||||||
|
exit 1
|
74
Makefile
74
Makefile
@@ -1,74 +0,0 @@
|
|||||||
.POSIX:
|
|
||||||
.SUFFIXES: .1 .1.scd
|
|
||||||
|
|
||||||
VERSION?=$(shell git describe --tags --dirty 2>/dev/null || echo 0.0.0)
|
|
||||||
VPATH=doc
|
|
||||||
VENDORED="mkinitfs-vendor-$(VERSION)"
|
|
||||||
PREFIX?=/usr/local
|
|
||||||
BINDIR?=$(PREFIX)/sbin
|
|
||||||
MANDIR?=$(PREFIX)/share/man
|
|
||||||
SHAREDIR?=$(PREFIX)/share
|
|
||||||
GO?=go
|
|
||||||
GOFLAGS?=
|
|
||||||
LDFLAGS+=-s -w -X main.Version=$(VERSION)
|
|
||||||
RM?=rm -f
|
|
||||||
GOTEST=go test -count=1 -race
|
|
||||||
|
|
||||||
GOSRC!=find * -name '*.go'
|
|
||||||
GOSRC+=go.mod go.sum
|
|
||||||
|
|
||||||
DOCS := \
|
|
||||||
mkinitfs.1
|
|
||||||
|
|
||||||
all: mkinitfs $(DOCS)
|
|
||||||
|
|
||||||
mkinitfs: $(GOSRC)
|
|
||||||
$(GO) build $(GOFLAGS) -ldflags "$(LDFLAGS)" -o mkinitfs ./cmd/mkinitfs
|
|
||||||
|
|
||||||
.1.scd.1:
|
|
||||||
scdoc < $< > $@
|
|
||||||
|
|
||||||
doc: $(DOCS)
|
|
||||||
|
|
||||||
.PHONY: fmt
|
|
||||||
fmt:
|
|
||||||
gofmt -w .
|
|
||||||
|
|
||||||
test:
|
|
||||||
@if [ `gofmt -l . | wc -l` -ne 0 ]; then \
|
|
||||||
gofmt -d .; \
|
|
||||||
echo "ERROR: source files need reformatting with gofmt"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
@staticcheck ./...
|
|
||||||
|
|
||||||
@$(GOTEST) ./...
|
|
||||||
|
|
||||||
clean:
|
|
||||||
$(RM) mkinitfs $(DOCS)
|
|
||||||
$(RM) $(VENDORED)*
|
|
||||||
|
|
||||||
install: $(DOCS) mkinitfs
|
|
||||||
install -Dm755 mkinitfs -t $(DESTDIR)$(BINDIR)/
|
|
||||||
install -Dm644 mkinitfs.1 -t $(DESTDIR)$(MANDIR)/man1/
|
|
||||||
|
|
||||||
.PHONY: checkinstall
|
|
||||||
checkinstall:
|
|
||||||
test -e $(DESTDIR)$(BINDIR)/mkinitfs
|
|
||||||
test -e $(DESTDIR)$(MANDIR)/man1/mkinitfs.1
|
|
||||||
|
|
||||||
RMDIR_IF_EMPTY:=sh -c '! [ -d $$0 ] || ls -1qA $$0 | grep -q . || rmdir $$0'
|
|
||||||
|
|
||||||
vendor:
|
|
||||||
go mod vendor
|
|
||||||
tar czf $(VENDORED).tar.gz vendor/
|
|
||||||
sha512sum $(VENDORED).tar.gz > $(VENDORED).tar.gz.sha512
|
|
||||||
$(RM) -rf vendor
|
|
||||||
|
|
||||||
uninstall:
|
|
||||||
$(RM) $(DESTDIR)$(BINDIR)/mkinitfs
|
|
||||||
${RMDIR_IF_EMPTY} $(DESTDIR)$(BINDIR)
|
|
||||||
$(RM) $(DESTDIR)$(MANDIR)/man1/mkinitfs.1
|
|
||||||
$(RMDIR_IF_EMPTY) $(DESTDIR)$(MANDIR)/man1
|
|
||||||
|
|
||||||
.PHONY: all clean install uninstall test vendor
|
|
48
README.md
48
README.md
@@ -1,48 +0,0 @@
|
|||||||
`mkinitfs` is a tool for generating an initramfs. It was originally designed
|
|
||||||
for postmarketOS, but a long term design goal is to be as distro-agnostic as
|
|
||||||
possible. It's capable of generating a split initramfs, in the style used by
|
|
||||||
postmarketOS, and supports running `boot-deploy` to install/finalize boot files
|
|
||||||
on a device.
|
|
||||||
|
|
||||||
## Building
|
|
||||||
|
|
||||||
Building this project requires a Go compiler/toolchain and `make`:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ make
|
|
||||||
```
|
|
||||||
|
|
||||||
To install locally:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ make install
|
|
||||||
```
|
|
||||||
|
|
||||||
Installation prefix can be set in the generally accepted way with setting
|
|
||||||
`PREFIX`:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ make PREFIX=/some/location
|
|
||||||
# make PREFIX=/some/location install
|
|
||||||
```
|
|
||||||
|
|
||||||
Other paths can be modified from the command line as well, see the top section of
|
|
||||||
the `Makefile` for more information.
|
|
||||||
|
|
||||||
Tests (functional and linting) can be executed by using the `test` make target:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ make test
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
The tool can be run with no options:
|
|
||||||
|
|
||||||
```
|
|
||||||
# mkinitfs
|
|
||||||
```
|
|
||||||
|
|
||||||
Configuration is done through a series of flat text files that list directories
|
|
||||||
and files, and by placing scripts in specific directories. See `man 1 mkinitfs`
|
|
||||||
for more information.
|
|
@@ -1,172 +0,0 @@
|
|||||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/archive"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/bootdeploy"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookdirs"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookfiles"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookscripts"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/initramfs"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/modules"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
|
|
||||||
)
|
|
||||||
|
|
||||||
// set at build time
|
|
||||||
var Version string
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
retCode := 0
|
|
||||||
defer func() { os.Exit(retCode) }()
|
|
||||||
|
|
||||||
outDir := flag.String("d", "/boot", "Directory to output initfs(-extra) and other boot files")
|
|
||||||
|
|
||||||
var showVersion bool
|
|
||||||
flag.BoolVar(&showVersion, "version", false, "Print version and quit.")
|
|
||||||
|
|
||||||
var disableBootDeploy bool
|
|
||||||
flag.BoolVar(&disableBootDeploy, "no-bootdeploy", false, "Disable running 'boot-deploy' after generating archives.")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
if showVersion {
|
|
||||||
fmt.Printf("%s - %s\n", filepath.Base(os.Args[0]), Version)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Default().SetFlags(log.Lmicroseconds)
|
|
||||||
|
|
||||||
var devinfo deviceinfo.DeviceInfo
|
|
||||||
deverr_usr := devinfo.ReadDeviceinfo("/usr/share/deviceinfo/deviceinfo")
|
|
||||||
deverr_etc := devinfo.ReadDeviceinfo("/etc/deviceinfo")
|
|
||||||
if deverr_etc != nil && deverr_usr != nil {
|
|
||||||
log.Println("Error reading deviceinfo")
|
|
||||||
log.Println("\t/usr/share/deviceinfo/deviceinfo:", deverr_usr)
|
|
||||||
log.Println("\t/etc/deviceinfo:", deverr_etc)
|
|
||||||
retCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
defer misc.TimeFunc(time.Now(), "mkinitfs")
|
|
||||||
|
|
||||||
kernVer, err := osutil.GetKernelVersion()
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
retCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// temporary working dir
|
|
||||||
workDir, err := os.MkdirTemp("", "mkinitfs")
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
log.Println("unable to create temporary work directory")
|
|
||||||
retCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
e := os.RemoveAll(workDir)
|
|
||||||
if e != nil && err == nil {
|
|
||||||
log.Println(e)
|
|
||||||
log.Println("unable to remove temporary work directory")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
log.Print("Generating for kernel version: ", kernVer)
|
|
||||||
log.Print("Output directory: ", *outDir)
|
|
||||||
|
|
||||||
//
|
|
||||||
// initramfs
|
|
||||||
//
|
|
||||||
// deviceinfo.InitfsCompression needs a little more post-processing
|
|
||||||
compressionFormat, compressionLevel := archive.ExtractFormatLevel(devinfo.InitfsCompression)
|
|
||||||
log.Printf("== Generating %s ==\n", "initramfs")
|
|
||||||
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
|
|
||||||
|
|
||||||
start := time.Now()
|
|
||||||
initramfsAr := archive.New(compressionFormat, compressionLevel)
|
|
||||||
initfs := initramfs.New([]filelist.FileLister{
|
|
||||||
hookdirs.New("/usr/share/mkinitfs/dirs"),
|
|
||||||
hookdirs.New("/etc/mkinitfs/dirs"),
|
|
||||||
hookfiles.New("/usr/share/mkinitfs/files"),
|
|
||||||
hookfiles.New("/etc/mkinitfs/files"),
|
|
||||||
hookscripts.New("/usr/share/mkinitfs/hooks", "/hooks"),
|
|
||||||
hookscripts.New("/etc/mkinitfs/hooks", "/hooks"),
|
|
||||||
modules.New("/usr/share/mkinitfs/modules"),
|
|
||||||
modules.New("/etc/mkinitfs/modules"),
|
|
||||||
})
|
|
||||||
if err := initramfsAr.AddItems(initfs); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
log.Println("failed to generate: ", "initramfs")
|
|
||||||
retCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := initramfsAr.Write(filepath.Join(workDir, "initramfs"), os.FileMode(0644)); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
log.Println("failed to generate: ", "initramfs")
|
|
||||||
retCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
misc.TimeFunc(start, "initramfs")
|
|
||||||
|
|
||||||
//
|
|
||||||
// initramfs-extra
|
|
||||||
//
|
|
||||||
// deviceinfo.InitfsExtraCompression needs a little more post-processing
|
|
||||||
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
|
|
||||||
log.Printf("== Generating %s ==\n", "initramfs-extra")
|
|
||||||
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
|
|
||||||
|
|
||||||
start = time.Now()
|
|
||||||
initramfsExtraAr := archive.New(compressionFormat, compressionLevel)
|
|
||||||
initfsExtra := initramfs.New([]filelist.FileLister{
|
|
||||||
hookfiles.New("/usr/share/mkinitfs/files-extra"),
|
|
||||||
hookfiles.New("/etc/mkinitfs/files-extra"),
|
|
||||||
hookscripts.New("/usr/share/mkinitfs/hooks-extra", "/hooks-extra"),
|
|
||||||
hookscripts.New("/etc/mkinitfs/hooks-extra", "/hooks-extra"),
|
|
||||||
modules.New("/usr/share/mkinitfs/modules-extra"),
|
|
||||||
modules.New("/etc/mkinitfs/modules-extra"),
|
|
||||||
})
|
|
||||||
if err := initramfsExtraAr.AddItemsExclude(initfsExtra, initfs); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
log.Println("failed to generate: ", "initramfs-extra")
|
|
||||||
retCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := initramfsExtraAr.Write(filepath.Join(workDir, "initramfs-extra"), os.FileMode(0644)); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
log.Println("failed to generate: ", "initramfs-extra")
|
|
||||||
retCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
misc.TimeFunc(start, "initramfs-extra")
|
|
||||||
|
|
||||||
// Final processing of initramfs / kernel is done by boot-deploy
|
|
||||||
if !disableBootDeploy {
|
|
||||||
if err := bootDeploy(workDir, *outDir, devinfo); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
log.Println("boot-deploy failed")
|
|
||||||
retCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func bootDeploy(workDir string, outDir string, devinfo deviceinfo.DeviceInfo) error {
|
|
||||||
log.Print("== Using boot-deploy to finalize/install files ==")
|
|
||||||
defer misc.TimeFunc(time.Now(), "boot-deploy")
|
|
||||||
|
|
||||||
bd := bootdeploy.New(workDir, outDir, devinfo)
|
|
||||||
return bd.Run()
|
|
||||||
}
|
|
@@ -1,194 +0,0 @@
|
|||||||
mkinitfs(1) "mkinitfs"
|
|
||||||
|
|
||||||
# NAME
|
|
||||||
|
|
||||||
mkinitfs
|
|
||||||
|
|
||||||
# DESCRIPTION
|
|
||||||
|
|
||||||
mkinitfs is a simple, generic tool for generating an initramfs, primarily
|
|
||||||
developed for use in postmarketOS
|
|
||||||
|
|
||||||
# CONCEPTS
|
|
||||||
|
|
||||||
mkinitfs is designed to generate two archives, "initramfs" and
|
|
||||||
"initramfs-extra", however it's possible to configure mkinitfs to run without
|
|
||||||
generating an initramfs-extra archive. mkinitfs is primarily configured through
|
|
||||||
the placement of files in specific directories detailed below in the
|
|
||||||
*DIRECTORIES* section. *deviceinfo* files are also used to provide other
|
|
||||||
configuration options to mkinitfs, these are covered under the *DEVICEINFO*
|
|
||||||
section below.
|
|
||||||
|
|
||||||
mkinitfs does not provide an init script, or any boot-time logic, it's purpose
|
|
||||||
is purely to generate the archive(s). mkinitfs does call *boot-deploy* after
|
|
||||||
creating the archive(s), in order to install/deploy them and any other relevant
|
|
||||||
boot-related items onto the system.
|
|
||||||
|
|
||||||
Design goals of this project are:
|
|
||||||
|
|
||||||
- Support as many distros as possible
|
|
||||||
- Simplify configuration, while still giving multiple opportunities to set or override defaults
|
|
||||||
- Execute an external app to do any boot install/setup finalization
|
|
||||||
- One such app is here: https://gitlab.com/postmarketOS/boot-deploy
|
|
||||||
- But implementation can be anything, see the section on *BOOT-DEPLOY*
|
|
||||||
for more info
|
|
||||||
|
|
||||||
# DEVICEINFO
|
|
||||||
|
|
||||||
The canonical deviceinfo "specification" is at
|
|
||||||
https://wiki.postmarketos.org/wiki/Deviceinfo_reference
|
|
||||||
|
|
||||||
mkinitfs reads deviceinfo values from */usr/share/deviceinfo/deviceinfo* and
|
|
||||||
*/etc/deviceinfo*, in that order. The following variables
|
|
||||||
are *required* by mkinitfs:
|
|
||||||
|
|
||||||
- deviceinfo_generate_systemd_boot
|
|
||||||
- deviceinfo_initfs_compression
|
|
||||||
- deviceinfo_initfs_extra_compression
|
|
||||||
- deviceinfo_uboot_boardname
|
|
||||||
|
|
||||||
It is a design goal to keep the number of required variables from deviceinfo to
|
|
||||||
a bare minimum, and to require only variables that don't hold lists of things.
|
|
||||||
|
|
||||||
*NOTE*: When deviceinfo_initfs_extra_compression is set, make sure that the
|
|
||||||
necessary tools to extract the configured archive format are in the initramfs
|
|
||||||
archive.
|
|
||||||
|
|
||||||
# ARCHIVE COMPRESSION
|
|
||||||
|
|
||||||
Archive compression parameters are specified in the
|
|
||||||
*deviceinfo_initfs_compression* and *deviceinfo_initfs_extra_compression*
|
|
||||||
deviceinfo variables. Their values do not have to match, but special
|
|
||||||
consideration should be taken since some formats may require additional kernel
|
|
||||||
options or tools in the initramfs to support it.
|
|
||||||
|
|
||||||
Supported compression *formats* for mkinitfs are:
|
|
||||||
|
|
||||||
- gzip
|
|
||||||
- lz4
|
|
||||||
- lzma
|
|
||||||
- none
|
|
||||||
- zstd
|
|
||||||
|
|
||||||
Supported compression *levels* for mkinitfs:
|
|
||||||
|
|
||||||
- best
|
|
||||||
- default
|
|
||||||
- fast
|
|
||||||
|
|
||||||
The value of these variables follows this syntax: *<format>:<level>*. For
|
|
||||||
example, *zstd* with the *fast* compression level would be:
|
|
||||||
*deviceinfo_initfs_compression="zstd:fast"*
|
|
||||||
|
|
||||||
Defaults to *gzip* and *default* for both archives if format and/or level is
|
|
||||||
unsupported or omitted.
|
|
||||||
|
|
||||||
|
|
||||||
# DIRECTORIES
|
|
||||||
|
|
||||||
The following directories are used by mkinitfs to generate the initramfs and
|
|
||||||
initramfs-extra archives. Directories that end in *-extra* indicate directories
|
|
||||||
that are used for constructing the initramfs-extra archive, while those without
|
|
||||||
it are for constructing the initramfs archive.
|
|
||||||
|
|
||||||
Configuration under */usr/share/mkinitfs* is intended to be managed by
|
|
||||||
distributions, while configuration under */etc/mkinitfs* is for users to
|
|
||||||
create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, and then from */etc/mkinitfs*.
|
|
||||||
|
|
||||||
## /usr/share/mkinitfs/files, /etc/mkinitfs/files
|
|
||||||
## /usr/share/mkinitfs/files-extra, /etc/mkinitfs/files-extra
|
|
||||||
|
|
||||||
Files with the *.files* extension are read as a list of
|
|
||||||
files/directories. Each line is in the format:
|
|
||||||
|
|
||||||
```
|
|
||||||
<source path>:<destination path>
|
|
||||||
```
|
|
||||||
|
|
||||||
The source path is the location, at runtime, of the file or directory
|
|
||||||
which will be copied to the destination path within the initramfs
|
|
||||||
archive. Specifying a destination path, with *:<destination path>* is
|
|
||||||
optional. If it is omitted, then the source path will be used as the
|
|
||||||
destination path within the archive. The source and destination paths
|
|
||||||
are delimited by a *:* (colon.) Destination path is ignored if the source
|
|
||||||
path is a glob that returns more than 1 file. This may change in the future.
|
|
||||||
|
|
||||||
[[ *Line in .files*
|
|
||||||
:< Comment
|
|
||||||
| */usr/share/bazz*
|
|
||||||
: File or directory */usr/share/bazz* would be added to the archive under */usr/share/bazz*
|
|
||||||
| */usr/share/bazz:/bazz*
|
|
||||||
: File or directory */usr/share/bazz* would be added to the archive under */bazz*
|
|
||||||
| */root/something/\**
|
|
||||||
: Everything under */root/something* would be added to the archive under */root/something*
|
|
||||||
| */etc/foo/\*/bazz:/foo*
|
|
||||||
: Anything that matches the glob will be installed under the source path in the archive. For example, */etc/foo/bar/bazz* would be installed at */etc/foo/bar/bazz* in the archive. The destination path is ignored.
|
|
||||||
|
|
||||||
It's possible to overwrite file/directory destinations from
|
|
||||||
configuration in */usr/share/mkinitfs* by specifying the same source
|
|
||||||
path(s) under the relevant directory in */etc/mkinitfs*, and changing
|
|
||||||
the destination path.
|
|
||||||
|
|
||||||
Any lines in these files that start with *#* are considered comments, and
|
|
||||||
skipped.
|
|
||||||
|
|
||||||
## /usr/share/mkinitfs/hooks, /etc/mkinitfs/hooks
|
|
||||||
## /usr/share/mkinitfs/hooks-extra*, /etc/mkinitfs/hooks-extra
|
|
||||||
|
|
||||||
Any files listed under these directories are copied as-is into the
|
|
||||||
relevant archives. Hooks are generally script files, but how they are
|
|
||||||
treated in the initramfs is entirely up to whatever init script is run
|
|
||||||
there on boot.
|
|
||||||
|
|
||||||
Hooks are installed in the initramfs under the */hooks* directory, and
|
|
||||||
under */hooks-extra* for the initramfs-extra.
|
|
||||||
|
|
||||||
## /usr/share/mkinitfs/modules, /etc/mkinitfs/modules
|
|
||||||
## /usr/share/mkinitfs/modules-extra, /etc/mkinitfs/modules-extra
|
|
||||||
|
|
||||||
Files with the *.modules* extention in these directories are lists of
|
|
||||||
kernel modules to include in the initramfs. Individual modules and
|
|
||||||
directories can be listed in the files here. Globbing is also supported.
|
|
||||||
|
|
||||||
Modules are installed in the initramfs archive under the same path they
|
|
||||||
exist on the system where mkinitfs is executed.
|
|
||||||
|
|
||||||
Any lines in these files that start with *#* are considered comments, and
|
|
||||||
skipped.
|
|
||||||
|
|
||||||
## /usr/share/mkinitfs/dirs, /etc/mkinitfs/dirs
|
|
||||||
|
|
||||||
Files with the *.dirs* extension in these directories are lists of
|
|
||||||
directories to create within the initramfs. There is no *-extra* variant,
|
|
||||||
since directories are of negligible size.
|
|
||||||
|
|
||||||
Any lines in these files that start with *#* are considered comments, and
|
|
||||||
skipped.
|
|
||||||
|
|
||||||
# BOOT-DEPLOY
|
|
||||||
|
|
||||||
After generating archives, mkinitfs will execute *boot-deploy*, using *$PATH* to
|
|
||||||
search for the app. The following commandline options are passed to it:
|
|
||||||
|
|
||||||
*-i* <initramfs filename>
|
|
||||||
|
|
||||||
Currently this is hardcoded to be "initramfs"
|
|
||||||
|
|
||||||
*-k* <kernel filename>
|
|
||||||
|
|
||||||
*-d* <work directory>
|
|
||||||
|
|
||||||
Path to the directory containing the build artifacts from mkinitfs.
|
|
||||||
|
|
||||||
*-o* <destination directory>
|
|
||||||
|
|
||||||
Path to the directory that boot-deploy should use as its root when
|
|
||||||
installing files.
|
|
||||||
|
|
||||||
*initramfs-extra*
|
|
||||||
|
|
||||||
This string is the filename of the initramfs-extra archive.
|
|
||||||
|
|
||||||
# AUTHORS
|
|
||||||
|
|
||||||
*Clayton Craft* <clayton@craftyguy.net>
|
|
9
go.mod
9
go.mod
@@ -1,11 +1,12 @@
|
|||||||
module gitlab.com/postmarketOS/postmarketos-mkinitfs
|
module gitlab.com/postmarketOS/postmarketos-mkinitfs
|
||||||
|
|
||||||
go 1.20
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
git.sr.ht/~sircmpwn/getopt v0.0.0-20201218204720-9961a9c6298f
|
||||||
|
github.com/BurntSushi/toml v0.4.0
|
||||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e
|
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e
|
||||||
github.com/klauspost/compress v1.15.12
|
github.com/klauspost/compress v1.13.3 // indirect
|
||||||
github.com/pierrec/lz4/v4 v4.1.17
|
github.com/klauspost/pgzip v1.2.5
|
||||||
github.com/ulikunitz/xz v0.5.10
|
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||||
)
|
)
|
||||||
|
32
go.sum
32
go.sum
@@ -1,10 +1,30 @@
|
|||||||
|
git.sr.ht/~sircmpwn/getopt v0.0.0-20201218204720-9961a9c6298f h1:f5axCdaRzGDCihN3o1Lq0ydn0VlkhY+11G0JOyY5qss=
|
||||||
|
git.sr.ht/~sircmpwn/getopt v0.0.0-20201218204720-9961a9c6298f/go.mod h1:wMEGFFFNuPos7vHmWXfszqImLppbc0wEhh6JBfJIUgw=
|
||||||
|
github.com/BurntSushi/toml v0.3.2-0.20210614224209-34d990aa228d/go.mod h1:2QZjSXA5e+XyFeCAxxtL8Z4StYUsTquL8ODGPR3C3MA=
|
||||||
|
github.com/BurntSushi/toml v0.3.2-0.20210621044154-20a94d639b8e/go.mod h1:t4zg8TkHfP16Vb3x4WKIw7zVYMit5QFtPEO8lOWxzTg=
|
||||||
|
github.com/BurntSushi/toml v0.3.2-0.20210624061728-01bfc69d1057/go.mod h1:NMj2lD5LfMqcE0w8tnqOsH6944oaqpI1974lrIwerfE=
|
||||||
|
github.com/BurntSushi/toml v0.3.2-0.20210704081116-ccff24ee4463/go.mod h1:EkRrMiQQmfxK6kIldz3QbPlhmVkrjW1RDJUnbDqGYvc=
|
||||||
|
github.com/BurntSushi/toml v0.4.0 h1:qD/r9AL67srjW6O3fcSKZDsXqzBNX6ieSRywr2hRrdE=
|
||||||
|
github.com/BurntSushi/toml v0.4.0/go.mod h1:wtejDu7Q0FhCWAo2aXkywSJyYFg01EDTKozLNCz2JBA=
|
||||||
|
github.com/BurntSushi/toml-test v0.1.1-0.20210620192437-de01089bbf76/go.mod h1:P/PrhmZ37t5llHfDuiouWXtFgqOoQ12SAh9j6EjrBR4=
|
||||||
|
github.com/BurntSushi/toml-test v0.1.1-0.20210624055653-1f6389604dc6/go.mod h1:UAIt+Eo8itMZAAgImXkPGDMYsT1SsJkVdB5TuONl86A=
|
||||||
|
github.com/BurntSushi/toml-test v0.1.1-0.20210704062846-269931e74e3f/go.mod h1:fnFWrIwqgHsEjVsW3RYCJmDo86oq9eiJ9u6bnqhtm2g=
|
||||||
|
github.com/BurntSushi/toml-test v0.1.1-0.20210723065233-facb9eccd4da h1:2QGUaQtV2u8V1USTI883wo+uxtZFAiZ4TCNupHJ98IU=
|
||||||
|
github.com/BurntSushi/toml-test v0.1.1-0.20210723065233-facb9eccd4da/go.mod h1:ve9Q/RRu2vHi42LocPLNvagxuUJh993/95b18bw/Nws=
|
||||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RSSp2Om9lubZpiMgVbvn39bsUmW9U5h0twqc=
|
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RSSp2Om9lubZpiMgVbvn39bsUmW9U5h0twqc=
|
||||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
|
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
|
||||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
github.com/klauspost/compress v1.13.3 h1:BtAvtV1+h0YwSVwWoYXMREPpYu9VzTJ9QDI1TEg/iQQ=
|
||||||
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
github.com/klauspost/compress v1.13.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||||
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||||
|
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
zgo.at/zli v0.0.0-20210619044753-e7020a328e59/go.mod h1:HLAc12TjNGT+VRXr76JnsNE3pbooQtwKWhX+RlDjQ2Y=
|
||||||
|
@@ -1,466 +0,0 @@
|
|||||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/cavaliercoder/go-cpio"
|
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
"github.com/pierrec/lz4/v4"
|
|
||||||
"github.com/ulikunitz/xz"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CompressFormat string
|
|
||||||
|
|
||||||
const (
|
|
||||||
FormatGzip CompressFormat = "gzip"
|
|
||||||
FormatLzma CompressFormat = "lzma"
|
|
||||||
FormatLz4 CompressFormat = "lz4"
|
|
||||||
FormatZstd CompressFormat = "zstd"
|
|
||||||
FormatNone CompressFormat = "none"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CompressLevel string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Mapped to the "default" level for the given format
|
|
||||||
LevelDefault CompressLevel = "default"
|
|
||||||
// Maps to the fastest compression level for the given format
|
|
||||||
LevelFast CompressLevel = "fast"
|
|
||||||
// Maps to the best compression level for the given format
|
|
||||||
LevelBest CompressLevel = "best"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Archive struct {
|
|
||||||
cpioWriter *cpio.Writer
|
|
||||||
buf *bytes.Buffer
|
|
||||||
compress_format CompressFormat
|
|
||||||
compress_level CompressLevel
|
|
||||||
items archiveItems
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(format CompressFormat, level CompressLevel) *Archive {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
archive := &Archive{
|
|
||||||
cpioWriter: cpio.NewWriter(buf),
|
|
||||||
buf: buf,
|
|
||||||
compress_format: format,
|
|
||||||
compress_level: level,
|
|
||||||
}
|
|
||||||
|
|
||||||
return archive
|
|
||||||
}
|
|
||||||
|
|
||||||
type archiveItem struct {
|
|
||||||
header *cpio.Header
|
|
||||||
sourcePath string
|
|
||||||
}
|
|
||||||
|
|
||||||
type archiveItems struct {
|
|
||||||
items []archiveItem
|
|
||||||
sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractFormatLevel parses the given string in the format format[:level],
|
|
||||||
// where :level is one of CompressLevel consts. If level is omitted from the
|
|
||||||
// string, or if it can't be parsed, the level is set to the default level for
|
|
||||||
// the given format. If format is unknown, gzip is selected. This function is
|
|
||||||
// designed to always return something usable within this package.
|
|
||||||
func ExtractFormatLevel(s string) (format CompressFormat, level CompressLevel) {
|
|
||||||
|
|
||||||
f, l, found := strings.Cut(s, ":")
|
|
||||||
if !found {
|
|
||||||
l = "default"
|
|
||||||
}
|
|
||||||
|
|
||||||
level = CompressLevel(strings.ToLower(l))
|
|
||||||
format = CompressFormat(strings.ToLower(f))
|
|
||||||
switch level {
|
|
||||||
|
|
||||||
}
|
|
||||||
switch level {
|
|
||||||
case LevelBest:
|
|
||||||
case LevelDefault:
|
|
||||||
case LevelFast:
|
|
||||||
default:
|
|
||||||
log.Print("Unknown or no compression level set, using default")
|
|
||||||
level = LevelDefault
|
|
||||||
}
|
|
||||||
|
|
||||||
switch format {
|
|
||||||
case FormatGzip:
|
|
||||||
case FormatLzma:
|
|
||||||
log.Println("Format lzma doesn't support a compression level, using default settings")
|
|
||||||
level = LevelDefault
|
|
||||||
case FormatLz4:
|
|
||||||
case FormatNone:
|
|
||||||
case FormatZstd:
|
|
||||||
default:
|
|
||||||
log.Print("Unknown or no compression format set, using gzip")
|
|
||||||
format = FormatGzip
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds the given item to the archiveItems, only if it doesn't already exist in
|
|
||||||
// the list. The items are kept sorted in ascending order.
|
|
||||||
func (a *archiveItems) add(item archiveItem) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
|
|
||||||
if len(a.items) < 1 {
|
|
||||||
// empty list
|
|
||||||
a.items = append(a.items, item)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// find existing item, or index of where new item should go
|
|
||||||
i := sort.Search(len(a.items), func(i int) bool {
|
|
||||||
return strings.Compare(item.header.Name, a.items[i].header.Name) <= 0
|
|
||||||
})
|
|
||||||
|
|
||||||
if i >= len(a.items) {
|
|
||||||
// doesn't exist in list, but would be at the very end
|
|
||||||
a.items = append(a.items, item)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Compare(a.items[i].header.Name, item.header.Name) == 0 {
|
|
||||||
// already in list
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// grow list by 1, shift right at index, and insert new string at index
|
|
||||||
a.items = append(a.items, archiveItem{})
|
|
||||||
copy(a.items[i+1:], a.items[i:])
|
|
||||||
a.items[i] = item
|
|
||||||
}
|
|
||||||
|
|
||||||
// iterate through items and send each one over the returned channel
|
|
||||||
func (a *archiveItems) IterItems() <-chan archiveItem {
|
|
||||||
ch := make(chan archiveItem)
|
|
||||||
go func() {
|
|
||||||
a.RLock()
|
|
||||||
defer a.RUnlock()
|
|
||||||
|
|
||||||
for _, item := range a.items {
|
|
||||||
ch <- item
|
|
||||||
}
|
|
||||||
close(ch)
|
|
||||||
}()
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
func (archive *Archive) Write(path string, mode os.FileMode) error {
|
|
||||||
if err := archive.writeCpio(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := archive.cpioWriter.Close(); err != nil {
|
|
||||||
return fmt.Errorf("archive.Write: error closing archive: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write archive to path
|
|
||||||
if err := archive.writeCompressed(path, mode); err != nil {
|
|
||||||
return fmt.Errorf("unable to write archive to location %q: %w", path, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Chmod(path, mode); err != nil {
|
|
||||||
return fmt.Errorf("unable to chmod %q to %s: %w", path, mode, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddItems adds the given items in the map to the archive. The map format is
|
|
||||||
// {source path:dest path}. Internally this just calls AddItem on each
|
|
||||||
// key,value pair in the map.
|
|
||||||
func (archive *Archive) AddItems(flister filelist.FileLister) error {
|
|
||||||
list, err := flister.List()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for i := range list.IterItems() {
|
|
||||||
if err := archive.AddItem(i.Source, i.Dest); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddItemsExclude is like AddItems, but takes a second FileLister that lists
|
|
||||||
// items that should not be added to the archive from the first FileLister
|
|
||||||
func (archive *Archive) AddItemsExclude(flister filelist.FileLister, exclude filelist.FileLister) error {
|
|
||||||
list, err := flister.List()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
excludeList, err := exclude.List()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range list.IterItems() {
|
|
||||||
dest, found := excludeList.Get(i.Source)
|
|
||||||
|
|
||||||
if found {
|
|
||||||
if i.Dest != dest {
|
|
||||||
found = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
if err := archive.AddItem(i.Source, i.Dest); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds the given file or directory at "source" to the archive at "dest"
|
|
||||||
func (archive *Archive) AddItem(source string, dest string) error {
|
|
||||||
|
|
||||||
sourceStat, err := os.Lstat(source)
|
|
||||||
if err != nil {
|
|
||||||
e, ok := err.(*os.PathError)
|
|
||||||
if e.Err == syscall.ENOENT && ok {
|
|
||||||
// doesn't exist in current filesystem, assume it's a new directory
|
|
||||||
return archive.addDir(dest)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sourceStat.Mode()&os.ModeDir != 0 {
|
|
||||||
return archive.addDir(dest)
|
|
||||||
}
|
|
||||||
|
|
||||||
return archive.addFile(source, dest)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (archive *Archive) addFile(source string, dest string) error {
|
|
||||||
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
sourceStat, err := os.Lstat(source)
|
|
||||||
if err != nil {
|
|
||||||
log.Print("addFile: failed to stat file: ", source)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Symlink: write symlink to archive then set 'file' to link target
|
|
||||||
if sourceStat.Mode()&os.ModeSymlink != 0 {
|
|
||||||
// log.Printf("File %q is a symlink", file)
|
|
||||||
target, err := os.Readlink(source)
|
|
||||||
if err != nil {
|
|
||||||
log.Print("addFile: failed to get symlink target: ", source)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
destFilename := strings.TrimPrefix(dest, "/")
|
|
||||||
|
|
||||||
archive.items.add(archiveItem{
|
|
||||||
sourcePath: source,
|
|
||||||
header: &cpio.Header{
|
|
||||||
Name: destFilename,
|
|
||||||
Linkname: target,
|
|
||||||
Mode: 0644 | cpio.ModeSymlink,
|
|
||||||
Size: int64(len(target)),
|
|
||||||
// Checksum: 1,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
if filepath.Dir(target) == "." {
|
|
||||||
target = filepath.Join(filepath.Dir(source), target)
|
|
||||||
}
|
|
||||||
// make sure target is an absolute path
|
|
||||||
if !filepath.IsAbs(target) {
|
|
||||||
target, err = osutil.RelativeSymlinkTargetToDir(target, filepath.Dir(source))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = archive.addFile(target, target)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
destFilename := strings.TrimPrefix(dest, "/")
|
|
||||||
|
|
||||||
archive.items.add(archiveItem{
|
|
||||||
sourcePath: source,
|
|
||||||
header: &cpio.Header{
|
|
||||||
Name: destFilename,
|
|
||||||
Mode: cpio.FileMode(sourceStat.Mode().Perm()),
|
|
||||||
Size: sourceStat.Size(),
|
|
||||||
// Checksum: 1,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (archive *Archive) writeCompressed(path string, mode os.FileMode) (err error) {
|
|
||||||
|
|
||||||
var compressor io.WriteCloser
|
|
||||||
defer func() {
|
|
||||||
e := compressor.Close()
|
|
||||||
if e != nil && err == nil {
|
|
||||||
err = e
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
fd, err := os.Create(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Note: fd.Close omitted since it'll be closed in "compressor"
|
|
||||||
|
|
||||||
switch archive.compress_format {
|
|
||||||
case FormatGzip:
|
|
||||||
level := gzip.DefaultCompression
|
|
||||||
switch archive.compress_level {
|
|
||||||
case LevelBest:
|
|
||||||
level = gzip.BestCompression
|
|
||||||
case LevelFast:
|
|
||||||
level = gzip.BestSpeed
|
|
||||||
}
|
|
||||||
compressor, err = gzip.NewWriterLevel(fd, level)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case FormatLzma:
|
|
||||||
compressor, err = xz.NewWriter(fd)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case FormatLz4:
|
|
||||||
// The default compression for the lz4 library is Fast, and
|
|
||||||
// they don't define a Default level otherwise
|
|
||||||
level := lz4.Fast
|
|
||||||
switch archive.compress_level {
|
|
||||||
case LevelBest:
|
|
||||||
level = lz4.Level9
|
|
||||||
case LevelFast:
|
|
||||||
level = lz4.Fast
|
|
||||||
}
|
|
||||||
|
|
||||||
var writer = lz4.NewWriter(fd)
|
|
||||||
err = writer.Apply(lz4.LegacyOption(true), lz4.CompressionLevelOption(level))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
compressor = writer
|
|
||||||
case FormatNone:
|
|
||||||
compressor = fd
|
|
||||||
case FormatZstd:
|
|
||||||
level := zstd.SpeedDefault
|
|
||||||
switch archive.compress_level {
|
|
||||||
case LevelBest:
|
|
||||||
level = zstd.SpeedBestCompression
|
|
||||||
case LevelFast:
|
|
||||||
level = zstd.SpeedFastest
|
|
||||||
}
|
|
||||||
compressor, err = zstd.NewWriter(fd, zstd.WithEncoderLevel(level))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
log.Print("Unknown or no compression format set, using gzip")
|
|
||||||
compressor = gzip.NewWriter(fd)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(compressor, archive.buf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// call fsync just to be sure
|
|
||||||
if err := fd.Sync(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Chmod(path, mode); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (archive *Archive) writeCpio() error {
|
|
||||||
// having a transient function for actually adding files to the archive
|
|
||||||
// allows the deferred fd.close to run after every copy and prevent having
|
|
||||||
// tons of open file handles until the copying is all done
|
|
||||||
copyToArchive := func(source string, header *cpio.Header) error {
|
|
||||||
|
|
||||||
if err := archive.cpioWriter.WriteHeader(header); err != nil {
|
|
||||||
return fmt.Errorf("archive.writeCpio: unable to write header: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// don't copy actual dirs into the archive, writing the header is enough
|
|
||||||
if !header.Mode.IsDir() {
|
|
||||||
if header.Mode.IsRegular() {
|
|
||||||
fd, err := os.Open(source)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("archive.writeCpio: uname to open file %q, %w", source, err)
|
|
||||||
}
|
|
||||||
defer fd.Close()
|
|
||||||
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
|
|
||||||
return fmt.Errorf("archive.writeCpio: unable to write out archive: %w", err)
|
|
||||||
}
|
|
||||||
} else if header.Linkname != "" {
|
|
||||||
// the contents of a symlink is just need the link name
|
|
||||||
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
|
|
||||||
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("archive.writeCpio: unknown type for file: %s", source)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range archive.items.IterItems() {
|
|
||||||
if err := copyToArchive(i.sourcePath, i.header); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (archive *Archive) addDir(dir string) error {
|
|
||||||
if dir == "/" {
|
|
||||||
dir = "."
|
|
||||||
}
|
|
||||||
|
|
||||||
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
|
|
||||||
for i, subdir := range subdirs {
|
|
||||||
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
|
|
||||||
archive.items.add(archiveItem{
|
|
||||||
sourcePath: path,
|
|
||||||
header: &cpio.Header{
|
|
||||||
Name: path,
|
|
||||||
Mode: cpio.ModeDir | 0755,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@@ -1,278 +0,0 @@
|
|||||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/cavaliercoder/go-cpio"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestArchiveItemsAdd(t *testing.T) {
|
|
||||||
subtests := []struct {
|
|
||||||
name string
|
|
||||||
inItems []archiveItem
|
|
||||||
inItem archiveItem
|
|
||||||
expected []archiveItem
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "empty list",
|
|
||||||
inItems: []archiveItem{},
|
|
||||||
inItem: archiveItem{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
expected: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "already exists",
|
|
||||||
inItems: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
inItem: archiveItem{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
expected: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add new",
|
|
||||||
inItems: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar1",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar1"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
inItem: archiveItem{
|
|
||||||
sourcePath: "/foo/bar0",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar0"},
|
|
||||||
},
|
|
||||||
expected: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar0",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar0"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar1",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar1"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add new at beginning",
|
|
||||||
inItems: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
inItem: archiveItem{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
expected: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add new at end",
|
|
||||||
inItems: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
inItem: archiveItem{
|
|
||||||
sourcePath: "/zzz/bazz",
|
|
||||||
header: &cpio.Header{Name: "/zzz/bazz"},
|
|
||||||
},
|
|
||||||
expected: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/zzz/bazz",
|
|
||||||
header: &cpio.Header{Name: "/zzz/bazz"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, st := range subtests {
|
|
||||||
t.Run(st.name, func(t *testing.T) {
|
|
||||||
a := archiveItems{items: st.inItems}
|
|
||||||
a.add(st.inItem)
|
|
||||||
if !reflect.DeepEqual(st.expected, a.items) {
|
|
||||||
t.Fatal("expected:", st.expected, " got: ", a.items)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExtractFormatLevel(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
in string
|
|
||||||
expectedFormat CompressFormat
|
|
||||||
expectedLevel CompressLevel
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "gzip, default level",
|
|
||||||
in: "gzip:default",
|
|
||||||
expectedFormat: FormatGzip,
|
|
||||||
expectedLevel: LevelDefault,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unknown format, level 12",
|
|
||||||
in: "pear:12",
|
|
||||||
expectedFormat: FormatGzip,
|
|
||||||
expectedLevel: LevelDefault,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "zstd, level not given",
|
|
||||||
in: "zstd",
|
|
||||||
expectedFormat: FormatZstd,
|
|
||||||
expectedLevel: LevelDefault,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "zstd, invalid level 'fast:'",
|
|
||||||
in: "zstd:fast:",
|
|
||||||
expectedFormat: FormatZstd,
|
|
||||||
expectedLevel: LevelDefault,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "zstd, best",
|
|
||||||
in: "zstd:best",
|
|
||||||
expectedFormat: FormatZstd,
|
|
||||||
expectedLevel: LevelBest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "zstd, level empty :",
|
|
||||||
in: "zstd:",
|
|
||||||
expectedFormat: FormatZstd,
|
|
||||||
expectedLevel: LevelDefault,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "gzip, best",
|
|
||||||
in: "gzip:best",
|
|
||||||
expectedFormat: FormatGzip,
|
|
||||||
expectedLevel: LevelBest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "<empty>, <empty>",
|
|
||||||
in: "",
|
|
||||||
expectedFormat: FormatGzip,
|
|
||||||
expectedLevel: LevelDefault,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "lzma, fast",
|
|
||||||
in: "lzma:fast",
|
|
||||||
expectedFormat: FormatLzma,
|
|
||||||
expectedLevel: LevelDefault,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "lz4, fast",
|
|
||||||
in: "lz4:fast",
|
|
||||||
expectedFormat: FormatLz4,
|
|
||||||
expectedLevel: LevelFast,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "none",
|
|
||||||
in: "none",
|
|
||||||
expectedFormat: FormatNone,
|
|
||||||
expectedLevel: LevelDefault,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
format, level := ExtractFormatLevel(test.in)
|
|
||||||
if format != test.expectedFormat {
|
|
||||||
t.Fatal("format expected: ", test.expectedFormat, " got: ", format)
|
|
||||||
}
|
|
||||||
if level != test.expectedLevel {
|
|
||||||
t.Fatal("level expected: ", test.expectedLevel, " got: ", level)
|
|
||||||
}
|
|
||||||
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,166 +0,0 @@
|
|||||||
package bootdeploy
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BootDeploy struct {
|
|
||||||
inDir string
|
|
||||||
outDir string
|
|
||||||
devinfo deviceinfo.DeviceInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new BootDeploy, which then runs:
|
|
||||||
//
|
|
||||||
// boot-deploy -d indir -o outDir
|
|
||||||
//
|
|
||||||
// devinfo is used to access some deviceinfo values, such as UbootBoardname
|
|
||||||
// and GenerateSystemdBoot
|
|
||||||
func New(inDir string, outDir string, devinfo deviceinfo.DeviceInfo) *BootDeploy {
|
|
||||||
return &BootDeploy{
|
|
||||||
inDir: inDir,
|
|
||||||
outDir: outDir,
|
|
||||||
devinfo: devinfo,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *BootDeploy) Run() error {
|
|
||||||
if err := copyUbootFiles(b.inDir, b.devinfo.UbootBoardname); errors.Is(err, os.ErrNotExist) {
|
|
||||||
log.Println("u-boot files copying skipped: ", err)
|
|
||||||
} else {
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("copyUbootFiles: ", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
kernels, err := getKernelPath(b.outDir, b.devinfo.GenerateSystemdBoot == "true")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pick a kernel that does not have suffixes added by boot-deploy
|
|
||||||
var kernFile string
|
|
||||||
for _, f := range kernels {
|
|
||||||
if strings.HasSuffix(f, "-dtb") || strings.HasSuffix(f, "-mtk") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
kernFile = f
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
kernFd, err := os.Open(kernFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer kernFd.Close()
|
|
||||||
|
|
||||||
kernFilename := path.Base(kernFile)
|
|
||||||
kernFileCopy, err := os.Create(filepath.Join(b.inDir, kernFilename))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(kernFileCopy, kernFd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := kernFileCopy.Close(); err != nil {
|
|
||||||
return fmt.Errorf("error closing %s: %w", kernFilename, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
|
|
||||||
cmd := exec.Command("boot-deploy",
|
|
||||||
"-i", "initramfs",
|
|
||||||
"-k", kernFilename,
|
|
||||||
"-d", b.inDir,
|
|
||||||
"-o", b.outDir,
|
|
||||||
"initramfs-extra")
|
|
||||||
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getKernelPath(outDir string, zboot bool) ([]string, error) {
|
|
||||||
var kernels []string
|
|
||||||
if zboot {
|
|
||||||
kernels, _ = filepath.Glob(filepath.Join(outDir, "linux.efi"))
|
|
||||||
if len(kernels) > 0 {
|
|
||||||
return kernels, nil
|
|
||||||
}
|
|
||||||
// else fallback to vmlinuz* below
|
|
||||||
}
|
|
||||||
|
|
||||||
kernFile := "vmlinuz*"
|
|
||||||
kernels, _ = filepath.Glob(filepath.Join(outDir, kernFile))
|
|
||||||
if len(kernels) == 0 {
|
|
||||||
return nil, errors.New("Unable to find any kernels at " + filepath.Join(outDir, kernFile))
|
|
||||||
}
|
|
||||||
|
|
||||||
return kernels, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy copies the file at srcFile path to a new file at dstFile path
|
|
||||||
func copy(srcFile, dstFile string) error {
|
|
||||||
out, err := os.Create(dstFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
errClose := out.Close()
|
|
||||||
if err == nil {
|
|
||||||
err = errClose
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
in, err := os.Open(srcFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer in.Close()
|
|
||||||
|
|
||||||
_, err = io.Copy(out, in)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyUbootFiles uses deviceinfo_uboot_boardname to copy u-boot files required
|
|
||||||
// for running boot-deploy
|
|
||||||
func copyUbootFiles(path, ubootBoardname string) error {
|
|
||||||
if ubootBoardname == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
srcDir := filepath.Join("/usr/share/u-boot", ubootBoardname)
|
|
||||||
entries, err := os.ReadDir(srcDir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, entry := range entries {
|
|
||||||
sourcePath := filepath.Join(srcDir, entry.Name())
|
|
||||||
destPath := filepath.Join(path, entry.Name())
|
|
||||||
|
|
||||||
if err := copy(sourcePath, destPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@@ -1,65 +0,0 @@
|
|||||||
package filelist
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
type FileLister interface {
|
|
||||||
List() (*FileList, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type File struct {
|
|
||||||
Source string
|
|
||||||
Dest string
|
|
||||||
}
|
|
||||||
|
|
||||||
type FileList struct {
|
|
||||||
m map[string]string
|
|
||||||
sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFileList() *FileList {
|
|
||||||
return &FileList{
|
|
||||||
m: make(map[string]string),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileList) Add(src string, dest string) {
|
|
||||||
f.Lock()
|
|
||||||
defer f.Unlock()
|
|
||||||
|
|
||||||
f.m[src] = dest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileList) Get(src string) (string, bool) {
|
|
||||||
f.RLock()
|
|
||||||
defer f.RUnlock()
|
|
||||||
|
|
||||||
dest, found := f.m[src]
|
|
||||||
return dest, found
|
|
||||||
}
|
|
||||||
|
|
||||||
// Import copies in the contents of src. If a source path already exists when
|
|
||||||
// importing, then the destination path is updated with the new value.
|
|
||||||
func (f *FileList) Import(src *FileList) {
|
|
||||||
for i := range src.IterItems() {
|
|
||||||
f.Add(i.Source, i.Dest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// iterate through the list and and send each one as a new File over the
|
|
||||||
// returned channel
|
|
||||||
func (f *FileList) IterItems() <-chan File {
|
|
||||||
ch := make(chan File)
|
|
||||||
go func() {
|
|
||||||
f.RLock()
|
|
||||||
defer f.RUnlock()
|
|
||||||
|
|
||||||
for src, dest := range f.m {
|
|
||||||
ch <- File{
|
|
||||||
Source: src,
|
|
||||||
Dest: dest,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
close(ch)
|
|
||||||
}()
|
|
||||||
return ch
|
|
||||||
}
|
|
@@ -1,56 +0,0 @@
|
|||||||
package hookdirs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
|
||||||
)
|
|
||||||
|
|
||||||
type HookDirs struct {
|
|
||||||
path string
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new HookDirs that will use the given path to provide a list
|
|
||||||
// of directories use.
|
|
||||||
func New(path string) *HookDirs {
|
|
||||||
return &HookDirs{
|
|
||||||
path: path,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HookDirs) List() (*filelist.FileList, error) {
|
|
||||||
log.Printf("- Searching for directories specified in %s", h.path)
|
|
||||||
|
|
||||||
files := filelist.NewFileList()
|
|
||||||
fileInfo, err := os.ReadDir(h.path)
|
|
||||||
if err != nil {
|
|
||||||
log.Println("-- Unable to find dir, skipping...")
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
for _, file := range fileInfo {
|
|
||||||
path := filepath.Join(h.path, file.Name())
|
|
||||||
f, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("getHookDirs: unable to open hook file: %w", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
log.Printf("-- Creating directories from: %s\n", path)
|
|
||||||
|
|
||||||
s := bufio.NewScanner(f)
|
|
||||||
for s.Scan() {
|
|
||||||
dir := s.Text()
|
|
||||||
if len(dir) == 0 || strings.HasPrefix(dir, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
files.Add(dir, dir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
}
|
|
@@ -1,88 +0,0 @@
|
|||||||
package hookfiles
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
|
||||||
)
|
|
||||||
|
|
||||||
type HookFiles struct {
|
|
||||||
filePath string
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new HookFiles that will use the given path to provide a list
|
|
||||||
// of files + any binary dependencies they might have.
|
|
||||||
func New(filePath string) *HookFiles {
|
|
||||||
return &HookFiles{
|
|
||||||
filePath: filePath,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HookFiles) List() (*filelist.FileList, error) {
|
|
||||||
log.Printf("- Searching for file lists from %s", h.filePath)
|
|
||||||
|
|
||||||
files := filelist.NewFileList()
|
|
||||||
fileInfo, err := os.ReadDir(h.filePath)
|
|
||||||
if err != nil {
|
|
||||||
log.Println("-- Unable to find dir, skipping...")
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
for _, file := range fileInfo {
|
|
||||||
path := filepath.Join(h.filePath, file.Name())
|
|
||||||
f, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("getHookFiles: unable to open hook file: %w", err)
|
|
||||||
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
log.Printf("-- Including files from: %s\n", path)
|
|
||||||
|
|
||||||
if list, err := slurpFiles(f); err != nil {
|
|
||||||
return nil, fmt.Errorf("hookfiles: unable to process hook file %q: %w", path, err)
|
|
||||||
} else {
|
|
||||||
files.Import(list)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func slurpFiles(fd io.Reader) (*filelist.FileList, error) {
|
|
||||||
files := filelist.NewFileList()
|
|
||||||
|
|
||||||
s := bufio.NewScanner(fd)
|
|
||||||
for s.Scan() {
|
|
||||||
line := s.Text()
|
|
||||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
src, dest, has_dest := strings.Cut(line, ":")
|
|
||||||
|
|
||||||
fFiles, err := misc.GetFiles([]string{src}, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to add %q: %w", src, err)
|
|
||||||
}
|
|
||||||
// loop over all returned files from GetFile
|
|
||||||
for _, file := range fFiles {
|
|
||||||
if !has_dest {
|
|
||||||
files.Add(file, file)
|
|
||||||
} else if len(fFiles) > 1 {
|
|
||||||
// Don't support specifying dest if src was a glob
|
|
||||||
// NOTE: this could support this later...
|
|
||||||
files.Add(file, file)
|
|
||||||
} else {
|
|
||||||
// dest path specified, and only 1 file
|
|
||||||
files.Add(file, dest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return files, s.Err()
|
|
||||||
}
|
|
@@ -1,42 +0,0 @@
|
|||||||
package hookscripts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
|
||||||
)
|
|
||||||
|
|
||||||
type HookScripts struct {
|
|
||||||
destPath string
|
|
||||||
scriptsDir string
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new HookScripts that will use the given path to provide a list
|
|
||||||
// of script files. The destination for each script it set to destPath, using
|
|
||||||
// the original file name.
|
|
||||||
func New(scriptsDir string, destPath string) *HookScripts {
|
|
||||||
return &HookScripts{
|
|
||||||
destPath: destPath,
|
|
||||||
scriptsDir: scriptsDir,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HookScripts) List() (*filelist.FileList, error) {
|
|
||||||
log.Printf("- Searching for hook scripts from %s", h.scriptsDir)
|
|
||||||
|
|
||||||
files := filelist.NewFileList()
|
|
||||||
|
|
||||||
fileInfo, err := os.ReadDir(h.scriptsDir)
|
|
||||||
if err != nil {
|
|
||||||
log.Println("-- Unable to find dir, skipping...")
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
for _, file := range fileInfo {
|
|
||||||
path := filepath.Join(h.scriptsDir, file.Name())
|
|
||||||
log.Printf("-- Including script: %s\n", path)
|
|
||||||
files.Add(path, filepath.Join(h.destPath, file.Name()))
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
}
|
|
@@ -1,38 +0,0 @@
|
|||||||
package initramfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Initramfs allows building arbitrarily complex lists of features, by slurping
|
|
||||||
// up types that implement FileLister (which includes this type! yippee) and
|
|
||||||
// combining the output from them.
|
|
||||||
type Initramfs struct {
|
|
||||||
features []filelist.FileLister
|
|
||||||
files *filelist.FileList
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new Initramfs that generate a list of files based on the given
|
|
||||||
// list of FileListers.
|
|
||||||
func New(features []filelist.FileLister) *Initramfs {
|
|
||||||
return &Initramfs{
|
|
||||||
features: features,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *Initramfs) List() (*filelist.FileList, error) {
|
|
||||||
if i.files != nil {
|
|
||||||
return i.files, nil
|
|
||||||
}
|
|
||||||
i.files = filelist.NewFileList()
|
|
||||||
|
|
||||||
for _, f := range i.features {
|
|
||||||
list, err := f.List()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
i.files.Import(list)
|
|
||||||
}
|
|
||||||
|
|
||||||
return i.files, nil
|
|
||||||
}
|
|
@@ -1,212 +0,0 @@
|
|||||||
package modules
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Modules struct {
|
|
||||||
modulesListPath string
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new Modules that will read in lists of kernel modules in the given path.
|
|
||||||
func New(modulesListPath string) *Modules {
|
|
||||||
return &Modules{
|
|
||||||
modulesListPath: modulesListPath,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Modules) List() (*filelist.FileList, error) {
|
|
||||||
kernVer, err := osutil.GetKernelVersion()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
files := filelist.NewFileList()
|
|
||||||
|
|
||||||
modDir := filepath.Join("/lib/modules", kernVer)
|
|
||||||
if exists, err := misc.Exists(modDir); !exists {
|
|
||||||
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
|
||||||
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
|
||||||
return files, nil
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", modDir, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// modules.* required by modprobe
|
|
||||||
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
|
|
||||||
for _, file := range modprobeFiles {
|
|
||||||
files.Add(file, file)
|
|
||||||
}
|
|
||||||
|
|
||||||
// slurp up modules from lists in modulesListPath
|
|
||||||
log.Printf("- Searching for kernel modules from %s", m.modulesListPath)
|
|
||||||
fileInfo, err := os.ReadDir(m.modulesListPath)
|
|
||||||
if err != nil {
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
for _, file := range fileInfo {
|
|
||||||
path := filepath.Join(m.modulesListPath, file.Name())
|
|
||||||
f, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to open module list file %q: %w", path, err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
log.Printf("-- Including modules from: %s\n", path)
|
|
||||||
|
|
||||||
if list, err := slurpModules(f, modDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to process module list file %q: %w", path, err)
|
|
||||||
} else {
|
|
||||||
files.Import(list)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
|
|
||||||
files := filelist.NewFileList()
|
|
||||||
s := bufio.NewScanner(fd)
|
|
||||||
for s.Scan() {
|
|
||||||
line := s.Text()
|
|
||||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dir, file := filepath.Split(line)
|
|
||||||
if file == "" {
|
|
||||||
// item is a directory
|
|
||||||
dir = filepath.Join(modDir, dir)
|
|
||||||
dirs, _ := filepath.Glob(dir)
|
|
||||||
for _, d := range dirs {
|
|
||||||
if modFilelist, err := getModulesInDir(d); err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to get modules dir %q: %w", d, err)
|
|
||||||
} else {
|
|
||||||
for _, file := range modFilelist {
|
|
||||||
files.Add(file, file)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if dir == "" {
|
|
||||||
// item is a module name
|
|
||||||
if modFilelist, err := getModule(s.Text(), modDir); err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to get module file %q: %w", s.Text(), err)
|
|
||||||
} else {
|
|
||||||
for _, file := range modFilelist {
|
|
||||||
files.Add(file, file)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
log.Printf("Unknown module entry: %q", line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return files, s.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
func getModulesInDir(modPath string) (files []string, err error) {
|
|
||||||
err = filepath.Walk(modPath, func(path string, _ os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
// Unable to walk path
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// this assumes module names are in the format <name>.ko[.format],
|
|
||||||
// where ".format" (e.g. ".gz") is optional.
|
|
||||||
if !strings.Contains(".ko", path) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
files = append(files, path)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
|
|
||||||
// file and all of its dependencies.
|
|
||||||
// Note: it's not necessarily fatal if the module is not found, since it may
|
|
||||||
// have been built into the kernel
|
|
||||||
func getModule(modName string, modDir string) (files []string, err error) {
|
|
||||||
|
|
||||||
modDep := filepath.Join(modDir, "modules.dep")
|
|
||||||
if exists, err := misc.Exists(modDep); !exists {
|
|
||||||
return nil, fmt.Errorf("kernel module.dep not found: %s", modDir)
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, fmt.Errorf("received unexpected error when getting module.dep status: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fd, err := os.Open(modDep)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to open modules.dep: %w", err)
|
|
||||||
}
|
|
||||||
defer fd.Close()
|
|
||||||
|
|
||||||
deps, err := getModuleDeps(modName, fd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, dep := range deps {
|
|
||||||
p := filepath.Join(modDir, dep)
|
|
||||||
if exists, err := misc.Exists(p); !exists {
|
|
||||||
return nil, fmt.Errorf("tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p)
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", p, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
files = append(files, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the canonicalized name for the module as represented in the given modules.dep io.reader
|
|
||||||
func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
|
|
||||||
var deps []string
|
|
||||||
|
|
||||||
// split the module name on - and/or _, build a regex for matching
|
|
||||||
splitRe := regexp.MustCompile("[-_]+")
|
|
||||||
modNameReStr := splitRe.ReplaceAllString(modName, "[-_]+")
|
|
||||||
re := regexp.MustCompile("^" + modNameReStr + "$")
|
|
||||||
|
|
||||||
s := bufio.NewScanner(modulesDep)
|
|
||||||
for s.Scan() {
|
|
||||||
line := s.Text()
|
|
||||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
if len(fields) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fields[0] = strings.TrimSuffix(fields[0], ":")
|
|
||||||
|
|
||||||
found := re.FindAll([]byte(filepath.Base(stripExts(fields[0]))), -1)
|
|
||||||
if len(found) > 0 {
|
|
||||||
deps = append(deps, fields...)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
log.Print("Unable to get module + dependencies: ", modName)
|
|
||||||
return deps, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return deps, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func stripExts(file string) string {
|
|
||||||
return strings.Split(file, ".")[0]
|
|
||||||
}
|
|
@@ -1,82 +0,0 @@
|
|||||||
// Copyright 2023 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package modules
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestStripExts(t *testing.T) {
|
|
||||||
tables := []struct {
|
|
||||||
in string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{"/foo/bar/bazz.tar", "/foo/bar/bazz"},
|
|
||||||
{"file.tar.gz.xz.zip", "file"},
|
|
||||||
{"another_file", "another_file"},
|
|
||||||
{"a.b.c.d.e.f.g.h.i", "a"},
|
|
||||||
{"virtio_blk.ko", "virtio_blk"},
|
|
||||||
}
|
|
||||||
for _, table := range tables {
|
|
||||||
out := stripExts(table.in)
|
|
||||||
if out != table.expected {
|
|
||||||
t.Errorf("Expected: %q, got: %q", table.expected, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var testModuleDep string = `
|
|
||||||
kernel/sound/soc/codecs/snd-soc-msm8916-digital.ko:
|
|
||||||
kernel/net/sched/act_ipt.ko.xz: kernel/net/netfilter/x_tables.ko.xz
|
|
||||||
kernel/drivers/watchdog/watchdog.ko.xz:
|
|
||||||
kernel/drivers/usb/serial/ir-usb.ko.xz: kernel/drivers/usb/serial/usbserial.ko.xz
|
|
||||||
kernel/drivers/gpu/drm/scheduler/gpu-sched.ko.xz:
|
|
||||||
kernel/drivers/hid/hid-alps.ko.xz:
|
|
||||||
kernel/net/netfilter/xt_u32.ko.xz: kernel/net/netfilter/x_tables.ko.xz
|
|
||||||
kernel/net/netfilter/xt_sctp.ko.xz: kernel/net/netfilter/x_tables.ko.xz
|
|
||||||
kernel/drivers/hwmon/gl518sm.ko.xz:
|
|
||||||
kernel/drivers/watchdog/dw_wdt.ko.xz: kernel/drivers/watchdog/watchdog.ko.xz
|
|
||||||
kernel/net/bluetooth/hidp/hidp.ko.xz: kernel/net/bluetooth/bluetooth.ko.xz kernel/net/rfkill/rfkill.ko.xz kernel/crypto/ecdh_generic.ko.xz kernel/crypto/ecc.ko.xz
|
|
||||||
kernel/fs/nls/nls_iso8859-1.ko.xz:
|
|
||||||
kernel/net/vmw_vsock/vmw_vsock_virtio_transport.ko.xz: kernel/net/vmw_vsock/vmw_vsock_virtio_transport_common.ko.xz kernel/drivers/virtio/virtio.ko.xz kernel/drivers/virtio/virtio_ring.ko.xz kernel/net/vmw_vsock/vsock.ko.xz
|
|
||||||
kernel/drivers/gpu/drm/panfrost/panfrost.ko.xz: kernel/drivers/gpu/drm/scheduler/gpu-sched.ko.xz
|
|
||||||
kernel/drivers/gpu/drm/msm/msm.ko: kernel/drivers/gpu/drm/drm_kms_helper.ko
|
|
||||||
`
|
|
||||||
|
|
||||||
func TestGetModuleDeps(t *testing.T) {
|
|
||||||
tables := []struct {
|
|
||||||
in string
|
|
||||||
expected []string
|
|
||||||
}{
|
|
||||||
{"nls-iso8859-1", []string{"kernel/fs/nls/nls_iso8859-1.ko.xz"}},
|
|
||||||
{"gpu_sched", []string{"kernel/drivers/gpu/drm/scheduler/gpu-sched.ko.xz"}},
|
|
||||||
{"dw-wdt", []string{"kernel/drivers/watchdog/dw_wdt.ko.xz",
|
|
||||||
"kernel/drivers/watchdog/watchdog.ko.xz"}},
|
|
||||||
{"gl518sm", []string{"kernel/drivers/hwmon/gl518sm.ko.xz"}},
|
|
||||||
{"msm", []string{"kernel/drivers/gpu/drm/msm/msm.ko",
|
|
||||||
"kernel/drivers/gpu/drm/drm_kms_helper.ko"}},
|
|
||||||
}
|
|
||||||
for _, table := range tables {
|
|
||||||
out, err := getModuleDeps(table.in, strings.NewReader(testModuleDep))
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpected error with input: %q, error: %q", table.expected, err)
|
|
||||||
}
|
|
||||||
if !stringSlicesEqual(out, table.expected) {
|
|
||||||
t.Errorf("Expected: %q, got: %q", table.expected, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func stringSlicesEqual(a []string, b []string) bool {
|
|
||||||
if len(a) != len(b) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, v := range a {
|
|
||||||
if v != b[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
@@ -1,211 +0,0 @@
|
|||||||
package misc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"debug/elf"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"log"
|
|
||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetFiles(list []string, required bool) (files []string, err error) {
|
|
||||||
for _, file := range list {
|
|
||||||
filelist, err := getFile(file, required)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
|
||||||
|
|
||||||
files = RemoveDuplicates(files)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function doesn't handle globs, use getFile() instead.
|
|
||||||
func getFileNormalized(file string, required bool) (files []string, err error) {
|
|
||||||
fileInfo, err := os.Stat(file)
|
|
||||||
|
|
||||||
// Trying some fallbacks...
|
|
||||||
if err != nil {
|
|
||||||
type triedResult struct {
|
|
||||||
file string
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
triedFiles := make([]triedResult, 0, 1)
|
|
||||||
|
|
||||||
// Temporary fallback until alpine/pmOS usr-merge happened
|
|
||||||
// If a path starts with /bin or /sbin, also try /usr equivalent before giving up
|
|
||||||
if strings.HasPrefix(file, "/bin/") || strings.HasPrefix(file, "/sbin/") {
|
|
||||||
fileUsr := filepath.Join("/usr", file)
|
|
||||||
_, err := os.Stat(fileUsr);
|
|
||||||
if err == nil {
|
|
||||||
log.Printf("getFile: failed to find %q, but found it in %q. Please adjust the path.", file, fileUsr)
|
|
||||||
return getFileNormalized(fileUsr, required)
|
|
||||||
} else {
|
|
||||||
triedFiles = append(triedFiles, triedResult{fileUsr, err})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
// Check if there is a Zstd-compressed version of the file
|
|
||||||
fileZstd := file + ".zst" // .zst is the extension used by linux-firmware
|
|
||||||
_, err := os.Stat(fileZstd);
|
|
||||||
if err == nil {
|
|
||||||
return getFileNormalized(fileZstd, required)
|
|
||||||
} else {
|
|
||||||
triedFiles = append(triedFiles, triedResult{fileZstd, err})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Failed to find anything
|
|
||||||
if required {
|
|
||||||
failStrings := make([]string, 0, 2)
|
|
||||||
for _, result := range triedFiles {
|
|
||||||
failStrings = append(failStrings, fmt.Sprintf("\n - also tried %q: %v", result.file, result.err))
|
|
||||||
}
|
|
||||||
return files, fmt.Errorf("getFile: failed to stat file %q: %v%q", file, err, strings.Join(failStrings, ""))
|
|
||||||
} else {
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fileInfo.IsDir() {
|
|
||||||
// Recurse over directory contents
|
|
||||||
err := filepath.Walk(file, func(path string, f os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if f.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
newFiles, err := getFile(path, required)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
files = append(files, newFiles...)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return files, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
files = append(files, file)
|
|
||||||
|
|
||||||
// get dependencies for binaries
|
|
||||||
if _, err := elf.Open(file); err == nil {
|
|
||||||
if binaryDepFiles, err := getBinaryDeps(file); err != nil {
|
|
||||||
return files, err
|
|
||||||
} else {
|
|
||||||
files = append(files, binaryDepFiles...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
files = RemoveDuplicates(files)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFile(file string, required bool) (files []string, err error) {
|
|
||||||
// Expand glob expression
|
|
||||||
expanded, err := filepath.Glob(file)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(expanded) > 0 && expanded[0] != file {
|
|
||||||
for _, path := range expanded {
|
|
||||||
if globFiles, err := getFile(path, required); err != nil {
|
|
||||||
return files, err
|
|
||||||
} else {
|
|
||||||
files = append(files, globFiles...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return RemoveDuplicates(files), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return getFileNormalized(file, required)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDeps(file string, parents map[string]struct{}) (files []string, err error) {
|
|
||||||
|
|
||||||
if _, found := parents[file]; found {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// get dependencies for binaries
|
|
||||||
fd, err := elf.Open(file)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("getDeps: unable to open elf binary %q: %w", file, err)
|
|
||||||
}
|
|
||||||
libs, _ := fd.ImportedLibraries()
|
|
||||||
fd.Close()
|
|
||||||
files = append(files, file)
|
|
||||||
parents[file] = struct{}{}
|
|
||||||
|
|
||||||
if len(libs) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// we don't recursively search these paths for performance reasons
|
|
||||||
libdirGlobs := []string{
|
|
||||||
"/usr/lib",
|
|
||||||
"/lib",
|
|
||||||
"/usr/lib/expect*",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, lib := range libs {
|
|
||||||
found := false
|
|
||||||
findDepLoop:
|
|
||||||
for _, libdirGlob := range libdirGlobs {
|
|
||||||
libdirs, _ := filepath.Glob(libdirGlob)
|
|
||||||
for _, libdir := range libdirs {
|
|
||||||
path := filepath.Join(libdir, lib)
|
|
||||||
if _, err := os.Stat(path); err == nil {
|
|
||||||
binaryDepFiles, err := getDeps(path, parents)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
files = append(files, binaryDepFiles...)
|
|
||||||
files = append(files, path)
|
|
||||||
found = true
|
|
||||||
break findDepLoop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return nil, fmt.Errorf("getDeps: unable to locate dependency for %q: %s", file, lib)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recursively list all dependencies for a given ELF binary
|
|
||||||
func getBinaryDeps(file string) ([]string, error) {
|
|
||||||
// if file is a symlink, resolve dependencies for target
|
|
||||||
fileStat, err := os.Lstat(file)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("getBinaryDeps: failed to stat file %q: %w", file, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Symlink: write symlink to archive then set 'file' to link target
|
|
||||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
|
||||||
target, err := os.Readlink(file)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("getBinaryDeps: unable to read symlink %q: %w", file, err)
|
|
||||||
}
|
|
||||||
if !filepath.IsAbs(target) {
|
|
||||||
target, err = osutil.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file = target
|
|
||||||
}
|
|
||||||
|
|
||||||
return getDeps(file, make(map[string]struct{}))
|
|
||||||
|
|
||||||
}
|
|
@@ -1,65 +0,0 @@
|
|||||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package misc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Merge the contents of "b" into "a", overwriting any previously existing keys
|
|
||||||
// in "a"
|
|
||||||
func Merge(a map[string]string, b map[string]string) {
|
|
||||||
for k, v := range b {
|
|
||||||
a[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removes duplicate entries from the given string slice and returns a slice
|
|
||||||
// with the unique values
|
|
||||||
func RemoveDuplicates(in []string) (out []string) {
|
|
||||||
// use a map to "remove" duplicates. the value in the map is totally
|
|
||||||
// irrelevant
|
|
||||||
outMap := make(map[string]bool)
|
|
||||||
for _, s := range in {
|
|
||||||
if ok := outMap[s]; !ok {
|
|
||||||
outMap[s] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out = make([]string, 0, len(outMap))
|
|
||||||
for k := range outMap {
|
|
||||||
out = append(out, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prints the execution time of a function, not meant to be very
|
|
||||||
// sensitive/accurate, but good enough to gauge rough run times.
|
|
||||||
// Meant to be called as:
|
|
||||||
//
|
|
||||||
// defer misc.TimeFunc(time.Now(), "foo")
|
|
||||||
func TimeFunc(start time.Time, name string) {
|
|
||||||
elapsed := time.Since(start)
|
|
||||||
log.Printf("%s completed in: %.2fs", name, elapsed.Seconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exists tests if the given file/dir exists or not. Returns any errors related
|
|
||||||
// to os.Stat if the type is *not* ErrNotExist. If an error is returned, then
|
|
||||||
// the value of the returned boolean cannot be trusted.
|
|
||||||
func Exists(file string) (bool, error) {
|
|
||||||
_, err := os.Stat(file)
|
|
||||||
if err == nil {
|
|
||||||
return true, nil
|
|
||||||
} else if errors.Is(err, os.ErrNotExist) {
|
|
||||||
// Don't return the error, the file doesn't exist which is OK
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Other errors from os.Stat returned here
|
|
||||||
return false, err
|
|
||||||
}
|
|
@@ -1,125 +0,0 @@
|
|||||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package misc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMerge(t *testing.T) {
|
|
||||||
subtests := []struct {
|
|
||||||
name string
|
|
||||||
inA map[string]string
|
|
||||||
inB map[string]string
|
|
||||||
expected map[string]string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "empty B",
|
|
||||||
inA: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"banana": "airplane",
|
|
||||||
},
|
|
||||||
inB: map[string]string{},
|
|
||||||
expected: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"banana": "airplane",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "empty A",
|
|
||||||
inA: map[string]string{},
|
|
||||||
inB: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"banana": "airplane",
|
|
||||||
},
|
|
||||||
expected: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"banana": "airplane",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "both populated, some duplicates",
|
|
||||||
inA: map[string]string{
|
|
||||||
"bar": "bazz",
|
|
||||||
"banana": "yellow",
|
|
||||||
"guava": "green",
|
|
||||||
},
|
|
||||||
inB: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"banana": "airplane",
|
|
||||||
},
|
|
||||||
expected: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"guava": "green",
|
|
||||||
"banana": "airplane",
|
|
||||||
"bar": "bazz",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, st := range subtests {
|
|
||||||
t.Run(st.name, func(t *testing.T) {
|
|
||||||
out := st.inA
|
|
||||||
Merge(out, st.inB)
|
|
||||||
if !reflect.DeepEqual(st.expected, out) {
|
|
||||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemoveDuplicates(t *testing.T) {
|
|
||||||
subtests := []struct {
|
|
||||||
name string
|
|
||||||
in []string
|
|
||||||
expected []string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "no duplicates",
|
|
||||||
in: []string{
|
|
||||||
"foo",
|
|
||||||
"bar",
|
|
||||||
"banana",
|
|
||||||
"airplane",
|
|
||||||
},
|
|
||||||
expected: []string{
|
|
||||||
"foo",
|
|
||||||
"bar",
|
|
||||||
"banana",
|
|
||||||
"airplane",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "all duplicates",
|
|
||||||
in: []string{
|
|
||||||
"foo",
|
|
||||||
"foo",
|
|
||||||
"foo",
|
|
||||||
"foo",
|
|
||||||
},
|
|
||||||
expected: []string{
|
|
||||||
"foo",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "empty",
|
|
||||||
in: []string{},
|
|
||||||
expected: []string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, st := range subtests {
|
|
||||||
t.Run(st.name, func(t *testing.T) {
|
|
||||||
// note: sorting to make comparison easier later
|
|
||||||
sort.Strings(st.expected)
|
|
||||||
out := RemoveDuplicates(st.in)
|
|
||||||
sort.Strings(out)
|
|
||||||
if !reflect.DeepEqual(st.expected, out) {
|
|
||||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
710
main.go
Normal file
710
main.go
Normal file
@@ -0,0 +1,710 @@
|
|||||||
|
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"debug/elf"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.sr.ht/~sircmpwn/getopt"
|
||||||
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/archive"
|
||||||
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
|
||||||
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
||||||
|
)
|
||||||
|
|
||||||
|
func timeFunc(start time.Time, name string) {
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
log.Printf("%s completed in: %s", name, elapsed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
devinfo, err := deviceinfo.ReadDeviceinfo()
|
||||||
|
if err != nil {
|
||||||
|
log.Print("NOTE: deviceinfo (from device package) not installed yet, " +
|
||||||
|
"not building the initramfs now (it should get built later " +
|
||||||
|
"automatically.)")
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var outDir string
|
||||||
|
getopt.StringVar(&outDir, "d", "/boot", "Directory to output initfs(-extra) and other boot files, default: /boot")
|
||||||
|
|
||||||
|
if err := getopt.Parse(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer timeFunc(time.Now(), "mkinitfs")
|
||||||
|
|
||||||
|
kernVer, err := getKernelVersion()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// temporary working dir
|
||||||
|
workDir, err := ioutil.TempDir("", "mkinitfs")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("Unable to create temporary work directory:", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(workDir)
|
||||||
|
|
||||||
|
log.Print("Generating for kernel version: ", kernVer)
|
||||||
|
log.Print("Output directory: ", outDir)
|
||||||
|
|
||||||
|
if err := generateInitfs("initramfs", workDir, kernVer, devinfo); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := generateInitfsExtra("initramfs-extra", workDir, devinfo); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final processing of initramfs / kernel is done by boot-deploy
|
||||||
|
if err := bootDeploy(workDir, outDir); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func bootDeploy(workDir string, outDir string) error {
|
||||||
|
// boot-deploy expects the kernel to be in the same dir as initramfs.
|
||||||
|
// Assume that the kernel is in the output dir...
|
||||||
|
log.Print("== Using boot-deploy to finalize/install files ==")
|
||||||
|
kernels, _ := filepath.Glob(filepath.Join(outDir, "vmlinuz*"))
|
||||||
|
if len(kernels) == 0 {
|
||||||
|
return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*"))
|
||||||
|
}
|
||||||
|
kernFile, err := os.Open(kernels[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer kernFile.Close()
|
||||||
|
|
||||||
|
kernFileCopy, err := os.Create(filepath.Join(workDir, "vmlinuz"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(kernFileCopy, kernFile); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
kernFileCopy.Close()
|
||||||
|
|
||||||
|
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
|
||||||
|
cmd := exec.Command("boot-deploy",
|
||||||
|
"-i", "initramfs",
|
||||||
|
"-k", "vmlinuz",
|
||||||
|
"-d", workDir,
|
||||||
|
"-o", outDir,
|
||||||
|
"initramfs-extra")
|
||||||
|
if !exists(cmd.Path) {
|
||||||
|
return errors.New("boot-deploy command not found.")
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
// err is ignored, since shellcheck will return != 0 if there are issues
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
log.Print("'boot-deploy' command failed: ")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createInitfsRootDirs(initfsRoot string) {
|
||||||
|
dirs := []string{
|
||||||
|
"/bin", "/sbin", "/usr/bin", "/usr/lib", "/usr/sbin", "/proc", "/sys",
|
||||||
|
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, dir := range dirs {
|
||||||
|
if err := os.MkdirAll(filepath.Join(initfsRoot, dir), os.FileMode(0775)); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func exists(file string) bool {
|
||||||
|
if _, err := os.Stat(file); err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHookFiles(filesdir string) misc.StringSet {
|
||||||
|
fileInfo, err := ioutil.ReadDir(filesdir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
files := make(misc.StringSet)
|
||||||
|
for _, file := range fileInfo {
|
||||||
|
path := filepath.Join(filesdir, file.Name())
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
s := bufio.NewScanner(f)
|
||||||
|
for s.Scan() {
|
||||||
|
if !exists(s.Text()) {
|
||||||
|
log.Fatalf("Unable to find file %q required by %q", s.Text(), path)
|
||||||
|
}
|
||||||
|
files[s.Text()] = false
|
||||||
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return files
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively list all dependencies for a given ELF binary
|
||||||
|
func getBinaryDeps(files misc.StringSet, file string) error {
|
||||||
|
// if file is a symlink, resolve dependencies for target
|
||||||
|
fileStat, err := os.Lstat(file)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("getBinaryDeps: failed to stat file")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Symlink: write symlink to archive then set 'file' to link target
|
||||||
|
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||||
|
target, err := os.Readlink(file)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("getBinaryDeps: unable to read symlink: ", file)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !filepath.IsAbs(target) {
|
||||||
|
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := getBinaryDeps(files, target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// get dependencies for binaries
|
||||||
|
fd, err := elf.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
libs, _ := fd.ImportedLibraries()
|
||||||
|
fd.Close()
|
||||||
|
files[file] = false
|
||||||
|
|
||||||
|
if len(libs) == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
libdirs := []string{"/usr/lib", "/lib"}
|
||||||
|
for _, lib := range libs {
|
||||||
|
found := false
|
||||||
|
for _, libdir := range libdirs {
|
||||||
|
path := filepath.Join(libdir, lib)
|
||||||
|
if _, err := os.Stat(path); err == nil {
|
||||||
|
err := getBinaryDeps(files, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
files[path] = false
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
log.Fatalf("Unable to locate dependency for %q: %s", file, lib)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFiles(files misc.StringSet, newFiles misc.StringSet, required bool) error {
|
||||||
|
for file := range newFiles {
|
||||||
|
err := getFile(files, file, required)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFile(files misc.StringSet, file string, required bool) error {
|
||||||
|
if !exists(file) {
|
||||||
|
if required {
|
||||||
|
return errors.New("getFile: File does not exist :" + file)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
files[file] = false
|
||||||
|
|
||||||
|
// get dependencies for binaries
|
||||||
|
if _, err := elf.Open(file); err != nil {
|
||||||
|
// file is not an elf, so don't resolve lib dependencies
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := getBinaryDeps(files, file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOskConfFontPath(oskConfPath string) (string, error) {
|
||||||
|
var path string
|
||||||
|
f, err := os.Open(oskConfPath)
|
||||||
|
if err != nil {
|
||||||
|
return path, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
s := bufio.NewScanner(f)
|
||||||
|
for s.Scan() {
|
||||||
|
fields := strings.Fields(s.Text())
|
||||||
|
// "key = val" is 3 fields
|
||||||
|
if len(fields) > 2 && fields[0] == "keyboard-font" {
|
||||||
|
path = fields[2]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !exists(path) {
|
||||||
|
return path, errors.New("Unable to find font: " + path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a list of files and their dependencies related to supporting rootfs full
|
||||||
|
// disk (d)encryption
|
||||||
|
func getFdeFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||||
|
confFiles := misc.StringSet{
|
||||||
|
"/etc/osk.conf": false,
|
||||||
|
"/etc/ts.conf": false,
|
||||||
|
"/etc/pointercal": false,
|
||||||
|
"/etc/fb.modes": false,
|
||||||
|
"/etc/directfbrc": false,
|
||||||
|
}
|
||||||
|
// TODO: this shouldn't be false? though some files (pointercal) don't always exist...
|
||||||
|
if err := getFiles(files, confFiles, false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// osk-sdl
|
||||||
|
oskFiles := misc.StringSet{
|
||||||
|
"/usr/bin/osk-sdl": false,
|
||||||
|
"/sbin/cryptsetup": false,
|
||||||
|
"/usr/lib/libGL.so.1": false}
|
||||||
|
if err := getFiles(files, oskFiles, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fontFile, err := getOskConfFontPath("/etc/osk.conf")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
files[fontFile] = false
|
||||||
|
|
||||||
|
// Directfb
|
||||||
|
dfbFiles := make(misc.StringSet)
|
||||||
|
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
|
||||||
|
if filepath.Ext(path) == ".so" {
|
||||||
|
dfbFiles[path] = false
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Print("getBinaryDeps: failed to stat file")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := getFiles(files, dfbFiles, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// tslib
|
||||||
|
tslibFiles := make(misc.StringSet)
|
||||||
|
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
|
||||||
|
if filepath.Ext(path) == ".so" {
|
||||||
|
tslibFiles[path] = false
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Print("getBinaryDeps: failed to stat file")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
libts, _ := filepath.Glob("/usr/lib/libts*")
|
||||||
|
for _, file := range libts {
|
||||||
|
tslibFiles[file] = false
|
||||||
|
}
|
||||||
|
if err = getFiles(files, tslibFiles, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// mesa hw accel
|
||||||
|
if devinfo.Deviceinfo_mesa_driver != "" {
|
||||||
|
mesaFiles := misc.StringSet{
|
||||||
|
"/usr/lib/libEGL.so.1": false,
|
||||||
|
"/usr/lib/libGLESv2.so.2": false,
|
||||||
|
"/usr/lib/libgbm.so.1": false,
|
||||||
|
"/usr/lib/libudev.so.1": false,
|
||||||
|
"/usr/lib/xorg/modules/dri/" + devinfo.Deviceinfo_mesa_driver + "_dri.so": false,
|
||||||
|
}
|
||||||
|
if err := getFiles(files, mesaFiles, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHookScripts(files misc.StringSet) {
|
||||||
|
scripts, _ := filepath.Glob("/etc/postmarketos-mkinitfs/hooks/*.sh")
|
||||||
|
for _, script := range scripts {
|
||||||
|
files[script] = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInitfsExtraFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||||
|
log.Println("== Generating initramfs extra ==")
|
||||||
|
binariesExtra := misc.StringSet{
|
||||||
|
"/lib/libz.so.1": false,
|
||||||
|
"/sbin/dmsetup": false,
|
||||||
|
"/sbin/e2fsck": false,
|
||||||
|
"/usr/sbin/parted": false,
|
||||||
|
"/usr/sbin/resize2fs": false,
|
||||||
|
"/usr/sbin/resize.f2fs": false,
|
||||||
|
}
|
||||||
|
log.Println("- Including extra binaries")
|
||||||
|
if err := getFiles(files, binariesExtra, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if exists("/usr/bin/osk-sdl") {
|
||||||
|
log.Println("- Including FDE support")
|
||||||
|
if err := getFdeFiles(files, devinfo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Println("- *NOT* including FDE support")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInitfsFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||||
|
log.Println("== Generating initramfs ==")
|
||||||
|
requiredFiles := misc.StringSet{
|
||||||
|
"/bin/busybox": false,
|
||||||
|
"/bin/sh": false,
|
||||||
|
"/bin/busybox-extras": false,
|
||||||
|
"/usr/sbin/telnetd": false,
|
||||||
|
"/sbin/kpartx": false,
|
||||||
|
"/etc/deviceinfo": false,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hook files & scripts
|
||||||
|
if exists("/etc/postmarketos-mkinitfs/files") {
|
||||||
|
log.Println("- Including hook files")
|
||||||
|
hookFiles := getHookFiles("/etc/postmarketos-mkinitfs/files")
|
||||||
|
if err := getFiles(files, hookFiles, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Println("- Including hook scripts")
|
||||||
|
getHookScripts(files)
|
||||||
|
|
||||||
|
log.Println("- Including required binaries")
|
||||||
|
if err := getFiles(files, requiredFiles, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInitfsModules(files misc.StringSet, devinfo deviceinfo.DeviceInfo, kernelVer string) error {
|
||||||
|
log.Println("- Including kernel modules")
|
||||||
|
|
||||||
|
modDir := filepath.Join("/lib/modules", kernelVer)
|
||||||
|
if !exists(modDir) {
|
||||||
|
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
||||||
|
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// modules.* required by modprobe
|
||||||
|
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
|
||||||
|
for _, file := range modprobeFiles {
|
||||||
|
files[file] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// module name (without extension), or directory (trailing slash is important! globs OK)
|
||||||
|
requiredModules := []string{
|
||||||
|
"loop",
|
||||||
|
"dm-crypt",
|
||||||
|
"kernel/fs/overlayfs/",
|
||||||
|
"kernel/crypto/",
|
||||||
|
"kernel/arch/*/crypto/",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range requiredModules {
|
||||||
|
dir, file := filepath.Split(item)
|
||||||
|
if file == "" {
|
||||||
|
// item is a directory
|
||||||
|
dir = filepath.Join(modDir, dir)
|
||||||
|
dirs, _ := filepath.Glob(dir)
|
||||||
|
for _, d := range dirs {
|
||||||
|
if err := getModulesInDir(files, d); err != nil {
|
||||||
|
log.Print("Unable to get modules in dir: ", d)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else if dir == "" {
|
||||||
|
// item is a module name
|
||||||
|
if err := getModule(files, file, modDir); err != nil {
|
||||||
|
log.Print("Unable to get module: ", file)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
log.Printf("Unknown module entry: %q", item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deviceinfo modules
|
||||||
|
for _, module := range strings.Fields(devinfo.Deviceinfo_modules_initfs) {
|
||||||
|
if err := getModule(files, module, modDir); err != nil {
|
||||||
|
log.Print("Unable to get modules from deviceinfo")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// /etc/postmarketos-mkinitfs/modules/*.modules
|
||||||
|
initfsModFiles, _ := filepath.Glob("/etc/postmarketos-mkinitfs/modules/*.modules")
|
||||||
|
for _, modFile := range initfsModFiles {
|
||||||
|
f, err := os.Open(modFile)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("getInitfsModules: unable to open mkinitfs modules file: ", modFile)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
s := bufio.NewScanner(f)
|
||||||
|
for s.Scan() {
|
||||||
|
if err := getModule(files, s.Text(), modDir); err != nil {
|
||||||
|
log.Print("getInitfsModules: unable to get module file: ", s.Text())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getKernelReleaseFile() (string, error) {
|
||||||
|
files, _ := filepath.Glob("/usr/share/kernel/*/kernel.release")
|
||||||
|
// only one kernel flavor supported
|
||||||
|
if len(files) != 1 {
|
||||||
|
return "", errors.New(fmt.Sprintf("Only one kernel release/flavor is supported, found: %q", files))
|
||||||
|
}
|
||||||
|
|
||||||
|
return files[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getKernelVersion() (string, error) {
|
||||||
|
var version string
|
||||||
|
|
||||||
|
releaseFile, err := getKernelReleaseFile()
|
||||||
|
if err != nil {
|
||||||
|
return version, err
|
||||||
|
}
|
||||||
|
|
||||||
|
contents, err := os.ReadFile(releaseFile)
|
||||||
|
if err != nil {
|
||||||
|
return version, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.TrimSpace(string(contents)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo.DeviceInfo) error {
|
||||||
|
initfsArchive, err := archive.New()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
requiredDirs := []string{
|
||||||
|
"/bin", "/sbin", "/usr/bin", "/usr/sbin", "/proc", "/sys",
|
||||||
|
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
|
||||||
|
}
|
||||||
|
for _, dir := range requiredDirs {
|
||||||
|
initfsArchive.Dirs[dir] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := getInitfsFiles(initfsArchive.Files, devinfo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := getInitfsModules(initfsArchive.Files, devinfo, kernVer); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// splash images
|
||||||
|
log.Println("- Including splash images")
|
||||||
|
splashFiles, _ := filepath.Glob("/usr/share/postmarketos-splashes/*.ppm.gz")
|
||||||
|
for _, file := range splashFiles {
|
||||||
|
// splash images are expected at /<file>
|
||||||
|
if err := initfsArchive.AddFile(file, filepath.Join("/", filepath.Base(file))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// initfs_functions
|
||||||
|
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("- Writing and verifying initramfs archive")
|
||||||
|
if err := initfsArchive.Write(filepath.Join(path, name), os.FileMode(0644)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateInitfsExtra(name string, path string, devinfo deviceinfo.DeviceInfo) error {
|
||||||
|
initfsExtraArchive, err := archive.New()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := getInitfsExtraFiles(initfsExtraArchive.Files, devinfo); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("- Writing and verifying initramfs-extra archive")
|
||||||
|
if err := initfsExtraArchive.Write(filepath.Join(path, name), os.FileMode(0644)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stripExts(file string) string {
|
||||||
|
for {
|
||||||
|
if filepath.Ext(file) == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
file = strings.TrimSuffix(file, filepath.Ext(file))
|
||||||
|
}
|
||||||
|
return file
|
||||||
|
}
|
||||||
|
|
||||||
|
func getModulesInDir(files misc.StringSet, modPath string) error {
|
||||||
|
err := filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error {
|
||||||
|
// TODO: need to support more extensions?
|
||||||
|
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
files[path] = false
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
|
||||||
|
// file and all of its dependencies.
|
||||||
|
// Note: it's not necessarily fatal if the module is not found, since it may
|
||||||
|
// have been built into the kernel
|
||||||
|
// TODO: look for it in modules.builtin, and make it fatal if it can't be found
|
||||||
|
// anywhere
|
||||||
|
func getModule(files misc.StringSet, modName string, modDir string) error {
|
||||||
|
|
||||||
|
deps, err := getModuleDeps(modName, modDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(deps) == 0 {
|
||||||
|
// retry and swap - and _ in module name
|
||||||
|
if strings.Contains(modName, "-") {
|
||||||
|
modName = strings.ReplaceAll(modName, "-", "_")
|
||||||
|
} else {
|
||||||
|
modName = strings.ReplaceAll(modName, "_", "-")
|
||||||
|
}
|
||||||
|
deps, err = getModuleDeps(modName, modDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, dep := range deps {
|
||||||
|
p := filepath.Join(modDir, dep)
|
||||||
|
if !exists(p) {
|
||||||
|
log.Print(fmt.Sprintf("Tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
files[p] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func getModuleDeps(modName string, modDir string) ([]string, error) {
|
||||||
|
var deps []string
|
||||||
|
|
||||||
|
modDep := filepath.Join(modDir, "modules.dep")
|
||||||
|
if !exists(modDep) {
|
||||||
|
log.Fatal("Kernel module.dep not found: ", modDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
fd, err := os.Open(modDep)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("Unable to open modules.dep: ", modDep)
|
||||||
|
return deps, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer fd.Close()
|
||||||
|
s := bufio.NewScanner(fd)
|
||||||
|
for s.Scan() {
|
||||||
|
fields := strings.Fields(s.Text())
|
||||||
|
fields[0] = strings.TrimSuffix(fields[0], ":")
|
||||||
|
if modName != filepath.Base(stripExts(fields[0])) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, modPath := range fields {
|
||||||
|
deps = append(deps, modPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
log.Print("Unable to get module + dependencies: ", modName)
|
||||||
|
return deps, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return deps, nil
|
||||||
|
}
|
26
main_test.go
Normal file
26
main_test.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStripExts(t *testing.T) {
|
||||||
|
tables := []struct {
|
||||||
|
in string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{"/foo/bar/bazz.tar", "/foo/bar/bazz"},
|
||||||
|
{"file.tar.gz.xz.zip", "file"},
|
||||||
|
{"another_file", "another_file"},
|
||||||
|
{"a.b.c.d.e.f.g.h.i", "a"},
|
||||||
|
{"virtio_blk.ko", "virtio_blk"},
|
||||||
|
}
|
||||||
|
for _, table := range tables {
|
||||||
|
out := stripExts(table.in)
|
||||||
|
if out != table.expected {
|
||||||
|
t.Errorf("Expected: %q, got: %q", table.expected, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
275
pkgs/archive/archive.go
Normal file
275
pkgs/archive/archive.go
Normal file
@@ -0,0 +1,275 @@
|
|||||||
|
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/flate"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"github.com/cavaliercoder/go-cpio"
|
||||||
|
"github.com/klauspost/pgzip"
|
||||||
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Archive struct {
|
||||||
|
Dirs misc.StringSet
|
||||||
|
Files misc.StringSet
|
||||||
|
cpioWriter *cpio.Writer
|
||||||
|
buf *bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func New() (*Archive, error) {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
archive := &Archive{
|
||||||
|
cpioWriter: cpio.NewWriter(buf),
|
||||||
|
Files: make(misc.StringSet),
|
||||||
|
Dirs: make(misc.StringSet),
|
||||||
|
buf: buf,
|
||||||
|
}
|
||||||
|
|
||||||
|
return archive, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (archive *Archive) Write(path string, mode os.FileMode) error {
|
||||||
|
if err := archive.writeCpio(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := archive.cpioWriter.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write archive to path
|
||||||
|
if err := archive.writeCompressed(path, mode); err != nil {
|
||||||
|
log.Print("Unable to write archive to location: ", path)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// test the archive to make sure it's valid
|
||||||
|
if err := test(path); err != nil {
|
||||||
|
log.Print("Verification of archive failed!")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Chmod(path, mode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checksum(path string) (string, error) {
|
||||||
|
var sum string
|
||||||
|
|
||||||
|
buf := make([]byte, 64*1024)
|
||||||
|
sha256 := sha256.New()
|
||||||
|
fd, err := os.Open(path)
|
||||||
|
defer fd.Close()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Print("Unable to checksum: ", path)
|
||||||
|
return sum, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read file in chunks
|
||||||
|
for {
|
||||||
|
bytes, err := fd.Read(buf)
|
||||||
|
if bytes > 0 {
|
||||||
|
_, err := sha256.Write(buf[:bytes])
|
||||||
|
if err != nil {
|
||||||
|
log.Print("Unable to checksum: ", path)
|
||||||
|
return sum, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sum = hex.EncodeToString(sha256.Sum(nil))
|
||||||
|
return sum, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (archive *Archive) AddFile(file string, dest string) error {
|
||||||
|
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if archive.Files[file] {
|
||||||
|
// Already written to cpio
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fileStat, err := os.Lstat(file)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("AddFile: failed to stat file: ", file)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Symlink: write symlink to archive then set 'file' to link target
|
||||||
|
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||||
|
// log.Printf("File %q is a symlink", file)
|
||||||
|
target, err := os.Readlink(file)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("AddFile: failed to get symlink target: ", file)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
destFilename := strings.TrimPrefix(dest, "/")
|
||||||
|
hdr := &cpio.Header{
|
||||||
|
Name: destFilename,
|
||||||
|
Linkname: target,
|
||||||
|
Mode: 0644 | cpio.ModeSymlink,
|
||||||
|
Size: int64(len(target)),
|
||||||
|
// Checksum: 1,
|
||||||
|
}
|
||||||
|
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err = archive.cpioWriter.Write([]byte(target)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
archive.Files[file] = true
|
||||||
|
if filepath.Dir(target) == "." {
|
||||||
|
target = filepath.Join(filepath.Dir(file), target)
|
||||||
|
}
|
||||||
|
// make sure target is an absolute path
|
||||||
|
if !filepath.IsAbs(target) {
|
||||||
|
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||||
|
}
|
||||||
|
// TODO: add verbose mode, print stuff like this:
|
||||||
|
// log.Printf("symlink: %q, target: %q", file, target)
|
||||||
|
// write symlink target
|
||||||
|
err = archive.AddFile(target, target)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// log.Printf("writing file: %q", file)
|
||||||
|
|
||||||
|
fd, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fd.Close()
|
||||||
|
|
||||||
|
destFilename := strings.TrimPrefix(dest, "/")
|
||||||
|
hdr := &cpio.Header{
|
||||||
|
Name: destFilename,
|
||||||
|
Mode: cpio.FileMode(fileStat.Mode().Perm()),
|
||||||
|
Size: fileStat.Size(),
|
||||||
|
// Checksum: 1,
|
||||||
|
}
|
||||||
|
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(archive.cpioWriter, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
archive.Files[file] = true
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use busybox gzip to test archive
|
||||||
|
func test(path string) error {
|
||||||
|
cmd := exec.Command("busybox", "gzip", "-t", path)
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
log.Print("'boot-deploy' command failed: ")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (archive *Archive) writeCompressed(path string, mode os.FileMode) error {
|
||||||
|
// TODO: support other compression formats, based on deviceinfo
|
||||||
|
fd, err := os.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
gz, err := pgzip.NewWriterLevel(fd, flate.BestSpeed)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(gz, archive.buf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := gz.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// call fsync just to be sure
|
||||||
|
if err := fd.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Chmod(path, mode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (archive *Archive) writeCpio() error {
|
||||||
|
// Write any dirs added explicitly
|
||||||
|
for dir := range archive.Dirs {
|
||||||
|
archive.addDir(dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write files and any missing parent dirs
|
||||||
|
for file, imported := range archive.Files {
|
||||||
|
if imported {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := archive.AddFile(file, file); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (archive *Archive) addDir(dir string) error {
|
||||||
|
if archive.Dirs[dir] {
|
||||||
|
// Already imported
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if dir == "/" {
|
||||||
|
dir = "."
|
||||||
|
}
|
||||||
|
|
||||||
|
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
|
||||||
|
for i, subdir := range subdirs {
|
||||||
|
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
|
||||||
|
if archive.Dirs[path] {
|
||||||
|
// Subdir already imported
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err := archive.cpioWriter.WriteHeader(&cpio.Header{
|
||||||
|
Name: path,
|
||||||
|
Mode: cpio.ModeDir | 0755,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
archive.Dirs[path] = true
|
||||||
|
// log.Print("wrote dir: ", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@@ -1,118 +1,53 @@
|
|||||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
package deviceinfo
|
package deviceinfo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"errors"
|
||||||
"fmt"
|
"github.com/BurntSushi/toml"
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Note: fields must be exported (start with capital letter)
|
||||||
|
// https://github.com/BurntSushi/toml/issues/121
|
||||||
type DeviceInfo struct {
|
type DeviceInfo struct {
|
||||||
InitfsCompression string
|
Deviceinfo_append_dtb string
|
||||||
InitfsExtraCompression string
|
Deviceinfo_arch string
|
||||||
UbootBoardname string
|
Deviceinfo_bootimg_append_seandroidenforce string
|
||||||
GenerateSystemdBoot string
|
Deviceinfo_bootimg_blobpack string
|
||||||
|
Deviceinfo_bootimg_dtb_second string
|
||||||
|
Deviceinfo_bootimg_mtk_mkimage string
|
||||||
|
Deviceinfo_bootimg_pxa string
|
||||||
|
Deviceinfo_bootimg_qcdt string
|
||||||
|
Deviceinfo_dtb string
|
||||||
|
Deviceinfo_flash_offset_base string
|
||||||
|
Deviceinfo_flash_offset_kernel string
|
||||||
|
Deviceinfo_flash_offset_ramdisk string
|
||||||
|
Deviceinfo_flash_offset_second string
|
||||||
|
Deviceinfo_flash_offset_tags string
|
||||||
|
Deviceinfo_flash_pagesize string
|
||||||
|
Deviceinfo_generate_bootimg string
|
||||||
|
Deviceinfo_generate_legacy_uboot_initfs string
|
||||||
|
Deviceinfo_mesa_driver string
|
||||||
|
Deviceinfo_mkinitfs_postprocess string
|
||||||
|
Deviceinfo_initfs_compression string
|
||||||
|
Deviceinfo_kernel_cmdline string
|
||||||
|
Deviceinfo_legacy_uboot_load_address string
|
||||||
|
Deviceinfo_modules_initfs string
|
||||||
|
Deviceinfo_flash_kernel_on_update string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reads the relevant entries from "file" into DeviceInfo struct
|
func ReadDeviceinfo() (DeviceInfo, error) {
|
||||||
// Any already-set entries will be overwriten if they are present
|
file := "/etc/deviceinfo"
|
||||||
// in "file"
|
var deviceinfo DeviceInfo
|
||||||
func (d *DeviceInfo) ReadDeviceinfo(file string) error {
|
|
||||||
if exists, err := misc.Exists(file); !exists {
|
|
||||||
return fmt.Errorf("%q not found, required by mkinitfs", file)
|
|
||||||
} else if err != nil {
|
|
||||||
return fmt.Errorf("unexpected error getting status for %q: %s", file, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fd, err := os.Open(file)
|
_, err := os.Stat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return deviceinfo, errors.New("Unable to find deviceinfo: " + file)
|
||||||
}
|
|
||||||
defer fd.Close()
|
|
||||||
|
|
||||||
if err := d.unmarshal(fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
if _, err := toml.DecodeFile(file, &deviceinfo); err != nil {
|
||||||
|
return deviceinfo, err
|
||||||
}
|
}
|
||||||
|
return deviceinfo, nil
|
||||||
// Unmarshals a deviceinfo into a DeviceInfo struct
|
|
||||||
func (d *DeviceInfo) unmarshal(r io.Reader) error {
|
|
||||||
s := bufio.NewScanner(r)
|
|
||||||
for s.Scan() {
|
|
||||||
line := s.Text()
|
|
||||||
if strings.HasPrefix(line, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// line isn't setting anything, so just ignore it
|
|
||||||
if !strings.Contains(line, "=") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// sometimes line has a comment at the end after setting an option
|
|
||||||
line = strings.SplitN(line, "#", 2)[0]
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
|
|
||||||
// must support having '=' in the value (e.g. kernel cmdline)
|
|
||||||
parts := strings.SplitN(line, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
name, val := parts[0], parts[1]
|
|
||||||
val = strings.ReplaceAll(val, "\"", "")
|
|
||||||
|
|
||||||
if name == "deviceinfo_format_version" && val != "0" {
|
|
||||||
return fmt.Errorf("deviceinfo format version %q is not supported", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldName := nameToField(name)
|
|
||||||
|
|
||||||
if fieldName == "" {
|
|
||||||
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
field := reflect.ValueOf(d).Elem().FieldByName(fieldName)
|
|
||||||
if !field.IsValid() {
|
|
||||||
// an option that meets the deviceinfo "specification", but isn't
|
|
||||||
// one we care about in this module
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
field.SetString(val)
|
|
||||||
}
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
log.Print("unable to parse deviceinfo: ", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert string into the string format used for DeviceInfo fields.
|
|
||||||
// Note: does not test that the resulting field name is a valid field in the
|
|
||||||
// DeviceInfo struct!
|
|
||||||
func nameToField(name string) string {
|
|
||||||
var field string
|
|
||||||
parts := strings.Split(name, "_")
|
|
||||||
for _, p := range parts {
|
|
||||||
if p == "deviceinfo" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(p) < 1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
field = field + strings.ToUpper(p[:1]) + p[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return field
|
|
||||||
}
|
}
|
||||||
|
@@ -1,96 +0,0 @@
|
|||||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package deviceinfo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Test ReadDeviceinfo and the logic of reading from multiple files
|
|
||||||
func TestReadDeviceinfo(t *testing.T) {
|
|
||||||
compression_expected := "gz -9"
|
|
||||||
|
|
||||||
var devinfo DeviceInfo
|
|
||||||
err := devinfo.ReadDeviceinfo("./test_resources/deviceinfo-missing")
|
|
||||||
if !strings.Contains(err.Error(), "required by mkinitfs") {
|
|
||||||
t.Errorf("received an unexpected err: %s", err)
|
|
||||||
}
|
|
||||||
err = devinfo.ReadDeviceinfo("./test_resources/deviceinfo-first")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("received an unexpected err: %s", err)
|
|
||||||
}
|
|
||||||
err = devinfo.ReadDeviceinfo("./test_resources/deviceinfo-msm")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("received an unexpected err: %s", err)
|
|
||||||
}
|
|
||||||
if devinfo.InitfsCompression != compression_expected {
|
|
||||||
t.Errorf("expected %q, got: %q", compression_expected, devinfo.InitfsCompression)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test conversion of name to DeviceInfo struct field format
|
|
||||||
func TestNameToField(t *testing.T) {
|
|
||||||
tables := []struct {
|
|
||||||
in string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{"deviceinfo_dtb", "Dtb"},
|
|
||||||
{"dtb", "Dtb"},
|
|
||||||
{"deviceinfo_initfs_compression", "InitfsCompression"},
|
|
||||||
{"modules_initfs", "ModulesInitfs"},
|
|
||||||
{"deviceinfo_initfs_compression___", "InitfsCompression"},
|
|
||||||
{"deviceinfo_initfs_extra_compression", "InitfsExtraCompression"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, table := range tables {
|
|
||||||
out := nameToField(table.in)
|
|
||||||
if out != table.expected {
|
|
||||||
t.Errorf("expected: %q, got: %q", table.expected, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test unmarshalling with lines in deviceinfo
|
|
||||||
func TestUnmarshal(t *testing.T) {
|
|
||||||
tables := []struct {
|
|
||||||
// field is just used for reflection within the test, so it must be a
|
|
||||||
// valid DeviceInfo field
|
|
||||||
field string
|
|
||||||
in string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{"InitfsCompression", "deviceinfo_initfs_compression=\"gzip:-9\"\n", "gzip:-9"},
|
|
||||||
// line with multiple '='
|
|
||||||
{"InitfsCompression", "deviceinfo_initfs_compression=zstd:--foo=1 -T0 --bar=bazz", "zstd:--foo=1 -T0 --bar=bazz"},
|
|
||||||
// empty option
|
|
||||||
{"InitfsCompression", "deviceinfo_initfs_compression=\"\"\n", ""},
|
|
||||||
// line with comment at the end
|
|
||||||
{"", "# this is a comment!\n", ""},
|
|
||||||
// empty lines are fine
|
|
||||||
{"", "", ""},
|
|
||||||
// line with whitepace characters only
|
|
||||||
{"", " \t \n\r", ""},
|
|
||||||
}
|
|
||||||
var d DeviceInfo
|
|
||||||
for _, table := range tables {
|
|
||||||
testName := fmt.Sprintf("unmarshal::'%s':", strings.ReplaceAll(table.in, "\n", "\\n"))
|
|
||||||
if err := d.unmarshal(strings.NewReader(table.in)); err != nil {
|
|
||||||
t.Errorf("%s received an unexpected err: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check against expected value
|
|
||||||
field := reflect.ValueOf(&d).Elem().FieldByName(table.field)
|
|
||||||
out := ""
|
|
||||||
if table.field != "" {
|
|
||||||
out = field.String()
|
|
||||||
}
|
|
||||||
if out != table.expected {
|
|
||||||
t.Errorf("%s expected: %q, got: %q", testName, table.expected, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@@ -1,2 +0,0 @@
|
|||||||
deviceinfo_initfs_compression="gz -9"
|
|
||||||
deviceinfo_mesa_driver="panfrost"
|
|
@@ -1 +0,0 @@
|
|||||||
deviceinfo_mesa_driver="msm"
|
|
@@ -1,15 +1,16 @@
|
|||||||
package osutil
|
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
package misc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"golang.org/x/sys/unix"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type StringSet map[string]bool
|
||||||
|
|
||||||
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
||||||
// absolute path
|
// absolute path
|
||||||
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
||||||
@@ -46,29 +47,3 @@ func FreeSpace(path string) (uint64, error) {
|
|||||||
size := stat.Bavail * uint64(stat.Bsize)
|
size := stat.Bavail * uint64(stat.Bsize)
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getKernelReleaseFile() (string, error) {
|
|
||||||
files, _ := filepath.Glob("/usr/share/kernel/*/kernel.release")
|
|
||||||
// only one kernel flavor supported
|
|
||||||
if len(files) != 1 {
|
|
||||||
return "", fmt.Errorf("only one kernel release/flavor is supported, found: %q", files)
|
|
||||||
}
|
|
||||||
|
|
||||||
return files[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetKernelVersion() (string, error) {
|
|
||||||
var version string
|
|
||||||
|
|
||||||
releaseFile, err := getKernelReleaseFile()
|
|
||||||
if err != nil {
|
|
||||||
return version, err
|
|
||||||
}
|
|
||||||
|
|
||||||
contents, err := os.ReadFile(releaseFile)
|
|
||||||
if err != nil {
|
|
||||||
return version, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.TrimSpace(string(contents)), nil
|
|
||||||
}
|
|
Reference in New Issue
Block a user