Compare commits
149 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
80098d29c6 | ||
|
67f1839ddc | ||
|
baf76ed614 | ||
|
27e271b904 | ||
|
1ac85b12fe | ||
|
f7f42bc2d4 | ||
|
c62a1f9ddb | ||
|
c9de619f98 | ||
|
a519769979 | ||
|
128a48dd24 | ||
|
499136e83a | ||
|
78f8fa32fb | ||
|
d03257981f | ||
|
307fb1889f | ||
|
fa3d3268d7 | ||
|
8b67848d5c | ||
|
31ab72edbc | ||
|
bd239c0365 | ||
|
a4c3b9ff96 | ||
|
8f505ffdc8 | ||
|
fb00e9e94b | ||
|
7c2377d0c8 | ||
|
f24d0139c9 | ||
|
5e2f975bd3 | ||
|
786e09d855 | ||
|
ba1e1a77db | ||
|
fd11f4a627 | ||
|
322d6bb754 | ||
|
1f4d8737e8 | ||
|
52fc741ba8 | ||
|
31b7eb34ee | ||
|
4e97990804 | ||
|
c01b48ad25 | ||
|
6aec4d564c | ||
|
6eb01e91e6 | ||
|
790cf47060 | ||
|
4074eada55 | ||
|
a7c4fe83ce | ||
|
06f86aadc9 | ||
|
d87a33a751 | ||
|
d1e150242d | ||
|
5968622f60 | ||
|
0179a0ca5c | ||
|
33c61b3c94 | ||
|
e4fb6cef70 | ||
|
4ae678d8ce | ||
|
71c2a87d56 | ||
|
9bb326be91 | ||
|
0545d68b1d | ||
|
c6e79551f4 | ||
|
a9f4281fbd | ||
|
bb50041257 | ||
|
09c897e737 | ||
|
a8bb10ce9c | ||
|
5e65ace958 | ||
|
cbcd4408e3 | ||
|
ad560591e1 | ||
|
89f1e067da | ||
|
4259478755 | ||
|
347668caa3 | ||
|
b0e28b4215 | ||
|
c1d96f699c | ||
|
25c3c03e24 | ||
|
07c8c711c7 | ||
|
e772fe0c87 | ||
|
6f05222018 | ||
|
c23af8b541 | ||
|
bd09de9232 | ||
|
22692e48d2 | ||
|
6c2f7b972b | ||
|
e5002f5750 | ||
|
662f559286 | ||
|
a4be663e13 | ||
|
14873015c0 | ||
|
6fdc8937b5 | ||
|
fb52066d8f | ||
|
b7f520cba4 | ||
|
31bf38f663 | ||
|
71d8131bb0 | ||
|
8b99b5f45b | ||
|
e8854ff88d | ||
|
1eb35cf8ef | ||
|
696633629a | ||
|
d9b68843a3 | ||
|
93005527e0 | ||
|
1c5f16762f | ||
|
af97d4654f | ||
|
b25c9bd390 | ||
|
1a0d00e39f | ||
|
af3c47c784 | ||
|
e7bbd1cadf | ||
|
1531d7e790 | ||
|
6d77b7a2d1 | ||
|
2dd83da480 | ||
|
e00e5faf6e | ||
|
5e07b63084 | ||
|
95582ee034 | ||
|
94584050ee | ||
|
e0977b4ac1 | ||
|
4176a8a661 | ||
|
73fd85f68c | ||
|
7e80107bbe | ||
|
f714f110a1 | ||
|
690d008643 | ||
|
731a805a9e | ||
|
b90624d7dd | ||
|
2a75cf9b4e | ||
|
d52cc16c88 | ||
|
112b572dc2 | ||
|
0c0a85f3bb | ||
|
2761535e12 | ||
|
1a72589f6f | ||
|
df0b5d66d7 | ||
|
c5f1cffca5 | ||
|
7eed20e35f | ||
|
e71cab485d | ||
|
568fe7f717 | ||
|
d78c6d5a62 | ||
|
c774b610d4 | ||
|
1e00f8f1cc | ||
|
28eed4fd12 | ||
|
c9ac9d9dd6 | ||
|
a4927a8915 | ||
|
029bdd849d | ||
|
8d21ae79c0 | ||
|
4278763cdb | ||
|
a6165b3a8c | ||
|
0eacd26615 | ||
|
e926bb301c | ||
|
961c455d59 | ||
|
4f601087e1 | ||
|
8b18e444a3 | ||
|
62c52e749e | ||
|
463ff1a7e4 | ||
|
3787944141 | ||
|
584a8e4e2a | ||
|
cdf41938b0 | ||
|
3d02037e3a | ||
|
6e2b4af336 | ||
|
9843f8a9c3 | ||
|
4b8a0a0d18 | ||
|
338c89504f | ||
|
c07eafd087 | ||
|
206e75c597 | ||
|
7a61e5126c | ||
|
0925cbd8ac | ||
|
866f17b86f | ||
|
15e99c3658 | ||
|
6400871749 |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1 +1,5 @@
|
||||
/postmarketos-mkinitfs
|
||||
/*.1
|
||||
/*.tar.gz
|
||||
/*.sha512
|
||||
/mkinitfs
|
||||
/vendor
|
||||
|
@@ -3,9 +3,15 @@
|
||||
# global settings
|
||||
image: alpine:edge
|
||||
|
||||
variables:
|
||||
GOFLAGS: "-buildvcs=false"
|
||||
PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/mkinitfs-vendor-${CI_COMMIT_TAG}/${CI_COMMIT_TAG}"
|
||||
|
||||
stages:
|
||||
- lint
|
||||
- build
|
||||
- vendor
|
||||
- release
|
||||
|
||||
# defaults for "only"
|
||||
# We need to run the CI jobs in a "merge request specific context", if CI is
|
||||
@@ -21,25 +27,37 @@ stages:
|
||||
- merge_requests
|
||||
- tags
|
||||
|
||||
# device documentation
|
||||
gofmt linting:
|
||||
stage: lint
|
||||
allow_failure: true
|
||||
<<: *only-default
|
||||
before_script:
|
||||
# specific mirror used because staticcheck hasn't made it to the other mirrors yet...
|
||||
- apk -q update --repository http://dl-4.alpinelinux.org/alpine/edge/testing
|
||||
- apk -q add --repository http://dl-4.alpinelinux.org/alpine/edge/testing go staticcheck
|
||||
script:
|
||||
- .gitlab-ci/check_linting.sh
|
||||
|
||||
build:
|
||||
stage: build
|
||||
<<: *only-default
|
||||
before_script:
|
||||
- apk -q add go
|
||||
- apk -q add go staticcheck make scdoc
|
||||
script:
|
||||
- go build -v
|
||||
- go test ./...
|
||||
- make test
|
||||
- make
|
||||
artifacts:
|
||||
expire_in: 1 week
|
||||
|
||||
vendor:
|
||||
stage: vendor
|
||||
image: alpine:latest
|
||||
only:
|
||||
- tags
|
||||
before_script:
|
||||
- apk -q add curl go make
|
||||
script:
|
||||
- |
|
||||
make VERSION="${CI_COMMIT_TAG}" vendor
|
||||
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file "mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz" "${PACKAGE_REGISTRY_URL}/"
|
||||
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file "mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512" "${PACKAGE_REGISTRY_URL}/"
|
||||
|
||||
release:
|
||||
stage: release
|
||||
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
||||
only:
|
||||
- tags
|
||||
script:
|
||||
- |
|
||||
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \
|
||||
--assets-link "{\"name\":\"mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz\",\"url\":\"${PACKAGE_REGISTRY_URL}/mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz\"}" \
|
||||
--assets-link "{\"name\":\"mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512\",\"url\":\"${PACKAGE_REGISTRY_URL}/mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512\"}"
|
||||
|
@@ -1,13 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "### Running gofmt..."
|
||||
files="$(gofmt -l .)"
|
||||
|
||||
if [ ! -z "$files" ]; then
|
||||
# run gofmt to print out the diff of what needs to be changed
|
||||
gofmt -d -e .
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "### Running staticcheck..."
|
||||
staticcheck ./...
|
74
Makefile
Normal file
74
Makefile
Normal file
@@ -0,0 +1,74 @@
|
||||
.POSIX:
|
||||
.SUFFIXES: .1 .1.scd
|
||||
|
||||
VERSION?=$(shell git describe --tags --dirty 2>/dev/null || echo 0.0.0)
|
||||
VPATH=doc
|
||||
VENDORED="mkinitfs-vendor-$(VERSION)"
|
||||
PREFIX?=/usr/local
|
||||
BINDIR?=$(PREFIX)/sbin
|
||||
MANDIR?=$(PREFIX)/share/man
|
||||
SHAREDIR?=$(PREFIX)/share
|
||||
GO?=go
|
||||
GOFLAGS?=
|
||||
LDFLAGS+=-s -w -X main.Version=$(VERSION)
|
||||
RM?=rm -f
|
||||
GOTEST=go test -count=1 -race
|
||||
|
||||
GOSRC!=find * -name '*.go'
|
||||
GOSRC+=go.mod go.sum
|
||||
|
||||
DOCS := \
|
||||
mkinitfs.1
|
||||
|
||||
all: mkinitfs $(DOCS)
|
||||
|
||||
mkinitfs: $(GOSRC)
|
||||
$(GO) build $(GOFLAGS) -ldflags "$(LDFLAGS)" -o mkinitfs ./cmd/mkinitfs
|
||||
|
||||
.1.scd.1:
|
||||
scdoc < $< > $@
|
||||
|
||||
doc: $(DOCS)
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
gofmt -w .
|
||||
|
||||
test:
|
||||
@if [ `gofmt -l . | wc -l` -ne 0 ]; then \
|
||||
gofmt -d .; \
|
||||
echo "ERROR: source files need reformatting with gofmt"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@staticcheck ./...
|
||||
|
||||
@$(GOTEST) ./...
|
||||
|
||||
clean:
|
||||
$(RM) mkinitfs $(DOCS)
|
||||
$(RM) $(VENDORED)*
|
||||
|
||||
install: $(DOCS) mkinitfs
|
||||
install -Dm755 mkinitfs -t $(DESTDIR)$(BINDIR)/
|
||||
install -Dm644 mkinitfs.1 -t $(DESTDIR)$(MANDIR)/man1/
|
||||
|
||||
.PHONY: checkinstall
|
||||
checkinstall:
|
||||
test -e $(DESTDIR)$(BINDIR)/mkinitfs
|
||||
test -e $(DESTDIR)$(MANDIR)/man1/mkinitfs.1
|
||||
|
||||
RMDIR_IF_EMPTY:=sh -c '! [ -d $$0 ] || ls -1qA $$0 | grep -q . || rmdir $$0'
|
||||
|
||||
vendor:
|
||||
go mod vendor
|
||||
tar czf $(VENDORED).tar.gz vendor/
|
||||
sha512sum $(VENDORED).tar.gz > $(VENDORED).tar.gz.sha512
|
||||
$(RM) -rf vendor
|
||||
|
||||
uninstall:
|
||||
$(RM) $(DESTDIR)$(BINDIR)/mkinitfs
|
||||
${RMDIR_IF_EMPTY} $(DESTDIR)$(BINDIR)
|
||||
$(RM) $(DESTDIR)$(MANDIR)/man1/mkinitfs.1
|
||||
$(RMDIR_IF_EMPTY) $(DESTDIR)$(MANDIR)/man1
|
||||
|
||||
.PHONY: all clean install uninstall test vendor
|
48
README.md
Normal file
48
README.md
Normal file
@@ -0,0 +1,48 @@
|
||||
`mkinitfs` is a tool for generating an initramfs. It was originally designed
|
||||
for postmarketOS, but a long term design goal is to be as distro-agnostic as
|
||||
possible. It's capable of generating a split initramfs, in the style used by
|
||||
postmarketOS, and supports running `boot-deploy` to install/finalize boot files
|
||||
on a device.
|
||||
|
||||
## Building
|
||||
|
||||
Building this project requires a Go compiler/toolchain and `make`:
|
||||
|
||||
```
|
||||
$ make
|
||||
```
|
||||
|
||||
To install locally:
|
||||
|
||||
```
|
||||
$ make install
|
||||
```
|
||||
|
||||
Installation prefix can be set in the generally accepted way with setting
|
||||
`PREFIX`:
|
||||
|
||||
```
|
||||
$ make PREFIX=/some/location
|
||||
# make PREFIX=/some/location install
|
||||
```
|
||||
|
||||
Other paths can be modified from the command line as well, see the top section of
|
||||
the `Makefile` for more information.
|
||||
|
||||
Tests (functional and linting) can be executed by using the `test` make target:
|
||||
|
||||
```
|
||||
$ make test
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The tool can be run with no options:
|
||||
|
||||
```
|
||||
# mkinitfs
|
||||
```
|
||||
|
||||
Configuration is done through a series of flat text files that list directories
|
||||
and files, and by placing scripts in specific directories. See `man 1 mkinitfs`
|
||||
for more information.
|
171
cmd/mkinitfs/main.go
Normal file
171
cmd/mkinitfs/main.go
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/archive"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/bootdeploy"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookdirs"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookfiles"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookscripts"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/initramfs"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/modules"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/osksdl"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
|
||||
)
|
||||
|
||||
// set at build time
|
||||
var Version string
|
||||
|
||||
func main() {
|
||||
retCode := 0
|
||||
defer func() { os.Exit(retCode) }()
|
||||
|
||||
outDir := flag.String("d", "/boot", "Directory to output initfs(-extra) and other boot files")
|
||||
|
||||
var showVersion bool
|
||||
flag.BoolVar(&showVersion, "version", false, "Print version and quit.")
|
||||
|
||||
var disableBootDeploy bool
|
||||
flag.BoolVar(&disableBootDeploy, "no-bootdeploy", false, "Disable running 'boot-deploy' after generating archives.")
|
||||
flag.Parse()
|
||||
|
||||
if showVersion {
|
||||
fmt.Printf("%s - %s\n", filepath.Base(os.Args[0]), Version)
|
||||
return
|
||||
}
|
||||
|
||||
log.Default().SetFlags(log.Lmicroseconds)
|
||||
|
||||
deviceinfoFile := "/etc/deviceinfo"
|
||||
if exists, err := misc.Exists(deviceinfoFile); !exists {
|
||||
log.Printf("NOTE: %q not found, this file is required by mkinitfs.\n", deviceinfoFile)
|
||||
return
|
||||
} else if err != nil {
|
||||
retCode = 1
|
||||
log.Printf("received unexpected error when getting status for %q: %s", deviceinfoFile, err)
|
||||
}
|
||||
|
||||
devinfo, err := deviceinfo.ReadDeviceinfo(deviceinfoFile)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
|
||||
defer misc.TimeFunc(time.Now(), "mkinitfs")
|
||||
|
||||
kernVer, err := osutil.GetKernelVersion()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
|
||||
// temporary working dir
|
||||
workDir, err := os.MkdirTemp("", "mkinitfs")
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
log.Println("unable to create temporary work directory")
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
e := os.RemoveAll(workDir)
|
||||
if e != nil && err == nil {
|
||||
err = e
|
||||
retCode = 1
|
||||
}
|
||||
}()
|
||||
|
||||
log.Print("Generating for kernel version: ", kernVer)
|
||||
log.Print("Output directory: ", *outDir)
|
||||
|
||||
// deviceinfo.InitfsCompression needs a little more post-processing
|
||||
compressionFormat, compressionLevel := archive.ExtractFormatLevel(devinfo.InitfsCompression)
|
||||
if err := generateArchive("initramfs", compressionFormat, compressionLevel, workDir, []filelist.FileLister{
|
||||
hookdirs.New("/usr/share/mkinitfs/dirs"),
|
||||
hookdirs.New("/etc/mkinitfs/dirs"),
|
||||
hookfiles.New("/usr/share/mkinitfs/files"),
|
||||
hookfiles.New("/etc/mkinitfs/files"),
|
||||
hookscripts.New("/usr/share/mkinitfs/hooks", "/hooks"),
|
||||
hookscripts.New("/etc/mkinitfs/hooks", "/hooks"),
|
||||
modules.New(strings.Fields(devinfo.ModulesInitfs), "/usr/share/mkinitfs/modules"),
|
||||
modules.New([]string{}, "/etc/mkinitfs/modules"),
|
||||
}); err != nil {
|
||||
log.Println(err)
|
||||
log.Println("failed to generate: ", "initramfs")
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
|
||||
// deviceinfo.InitfsExtraCompression needs a little more post-processing
|
||||
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
|
||||
if err := generateArchive("initramfs-extra", compressionFormat, compressionLevel, workDir, []filelist.FileLister{
|
||||
hookfiles.New("/usr/share/mkinitfs/files-extra"),
|
||||
hookfiles.New("/etc/mkinitfs/files-extra"),
|
||||
hookscripts.New("/usr/share/mkinitfs/hooks-extra", "/hooks-extra"),
|
||||
hookscripts.New("/etc/mkinitfs/hooks-extra", "/hooks-extra"),
|
||||
modules.New([]string{}, "/usr/share/mkinitfs/modules-extra"),
|
||||
modules.New([]string{}, "/etc/mkinitfs/modules-extra"),
|
||||
osksdl.New(devinfo.MesaDriver),
|
||||
}); err != nil {
|
||||
log.Println(err)
|
||||
log.Println("failed to generate: ", "initramfs-extra")
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
|
||||
// Final processing of initramfs / kernel is done by boot-deploy
|
||||
if !disableBootDeploy {
|
||||
if err := bootDeploy(workDir, *outDir, devinfo.UbootBoardname); err != nil {
|
||||
log.Println(err)
|
||||
log.Println("boot-deploy failed")
|
||||
retCode = 1
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func bootDeploy(workDir, outDir, ubootBoardname string) error {
|
||||
log.Print("== Using boot-deploy to finalize/install files ==")
|
||||
defer misc.TimeFunc(time.Now(), "boot-deploy")
|
||||
|
||||
bd := bootdeploy.New(workDir, outDir, ubootBoardname)
|
||||
return bd.Run()
|
||||
}
|
||||
|
||||
func generateArchive(name string, format archive.CompressFormat, level archive.CompressLevel, path string, features []filelist.FileLister) error {
|
||||
log.Printf("== Generating %s ==\n", name)
|
||||
log.Printf("- Using compression format %s with level %q\n", format, level)
|
||||
|
||||
defer misc.TimeFunc(time.Now(), name)
|
||||
a, err := archive.New(format, level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fs := initramfs.New(features)
|
||||
if err := a.AddItems(fs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Println("- Writing and verifying archive: ", name)
|
||||
if err := a.Write(filepath.Join(path, name), os.FileMode(0644)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
122
doc/mkinitfs.1.scd
Normal file
122
doc/mkinitfs.1.scd
Normal file
@@ -0,0 +1,122 @@
|
||||
mkinitfs(1) "mkinitfs"
|
||||
|
||||
# NAME
|
||||
|
||||
mkinitfs
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
mkinitfs is a simple, generic tool for generating an initramfs, primarily
|
||||
developed for use in postmarketOS
|
||||
|
||||
# CONCEPTS
|
||||
|
||||
mkinitfs is designed to generate two archives, "initramfs" and
|
||||
"initramfs-extra", however it's possible to configure mkinitfs to run without
|
||||
generating an initramfs-extra archive. mkinitfs is primarily configured through
|
||||
the placement of files in specific directories detailed below in the
|
||||
*DIRECTORIES* section. *deviceinfo* files are also used to provide other
|
||||
configuration options to mkinitfs, these are covered under the *DEVICEINFO*
|
||||
section below.
|
||||
|
||||
mkinitfs does not provide an init script, or any boot-time logic, it's purpose
|
||||
is purely to generate the archive(s). mkinitfs does call *boot-deploy* after
|
||||
creating the archive(s), in order to install/deploy them and any other relevant
|
||||
boot-related items onto the system.
|
||||
|
||||
# DEVICEINFO
|
||||
|
||||
The canonical deviceinfo "specification" is at
|
||||
https://wiki.postmarketos.org/wiki/Deviceinfo_reference
|
||||
|
||||
mkinitfs reads deviceinfo values from */etc/deviceinfo*. The following variables
|
||||
are *required* by mkinitfs:
|
||||
|
||||
- deviceinfo_initfs_compression
|
||||
- deviceinfo_initfs_extra_compression
|
||||
- deviceinfo_mesa_driver
|
||||
- deviceinfo_modules_initfs
|
||||
- deviceinfo_uboot_boardname
|
||||
|
||||
It is a design goal to keep the number of required variables from deviceinfo to
|
||||
a bare minimum, and to require only variables that don't hold lists of things.
|
||||
|
||||
*NOTE*: When deviceinfo_initfs_extra_compression is set, make sure that the
|
||||
necessary tools to extract the configured archive format are in the initramfs
|
||||
archive.
|
||||
|
||||
# DIRECTORIES
|
||||
|
||||
The following directories are used by mkinitfs to generate the initramfs and
|
||||
initramfs-extra archives. Directories that end in *-extra* indicate directories
|
||||
that are used for constructing the initramfs-extra archive, while those without
|
||||
it are for constructing the initramfs archive.
|
||||
|
||||
Configuration under */usr/share/mkinitfs* is intended to be managed by
|
||||
distributions, while configuration under */etc/mkinitfs* is for users to
|
||||
create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, and then from */etc/mkinitfs*.
|
||||
|
||||
## /usr/share/mkinitfs/files, /etc/mkinitfs/files
|
||||
## /usr/share/mkinitfs/files-extra, /etc/mkinitfs/files-extra
|
||||
|
||||
Files with the *.files* extension are read as a list of
|
||||
files/directories. Each line is in the format:
|
||||
|
||||
```
|
||||
<source path>:<destination path>
|
||||
```
|
||||
|
||||
The source path is the location, at runtime, of the file or directory
|
||||
which will be copied to the destination path within the initramfs
|
||||
archive. Specifying a destination path, with *:<destination path>* is
|
||||
optional. If it is omitted, then the source path will be used as the
|
||||
destination path within the archive. The source and destination paths
|
||||
are delimited by a *:* (colon.) Destination path is ignored if the source
|
||||
path is a glob that returns more than 1 file. This may change in the future.
|
||||
|
||||
[[ *Line in .files*
|
||||
:< Comment
|
||||
| */usr/share/bazz*
|
||||
: File or directory */usr/share/bazz* would be added to the archive under */usr/share/bazz*
|
||||
| */usr/share/bazz:/bazz*
|
||||
: File or directory */usr/share/bazz* would be added to the archive under */bazz*
|
||||
| */root/something/\**
|
||||
: Everything under */root/something* would be added to the archive under */root/something*
|
||||
| */etc/foo/\*/bazz:/foo*
|
||||
: Anything that matches the glob will be installed under the source path in the archive. For example, */etc/foo/bar/bazz* would be installed at */etc/foo/bar/bazz* in the archive. The destination path is ignored.
|
||||
|
||||
It's possible to overwrite file/directory destinations from
|
||||
configuration in */usr/share/mkinitfs* by specifying the same source
|
||||
path(s) under the relevant directory in */etc/mkinitfs*, and changing
|
||||
the destination path.
|
||||
|
||||
## /usr/share/mkinitfs/hooks, /etc/mkinitfs/hooks
|
||||
## /usr/share/mkinitfs/hooks-extra*, /etc/mkinitfs/hooks-extra
|
||||
|
||||
Any files listed under these directories are copied as-is into the
|
||||
relevant archives. Hooks are generally script files, but how they are
|
||||
treated in the initramfs is entirely up to whatever init script is run
|
||||
there on boot.
|
||||
|
||||
Hooks are installed in the initramfs under the */hooks* directory, and
|
||||
under */hooks-extra* for the initramfs-extra.
|
||||
|
||||
## /usr/share/mkinitfs/modules, /etc/mkinitfs/modules
|
||||
## /usr/share/mkinitfs/modules-extra, /etc/mkinitfs/modules-extra
|
||||
|
||||
Files with the *.modules* extention in these directories are lists of
|
||||
kernel modules to include in the initramfs. Individual modules and
|
||||
directories can be listed in the files here. Globbing is also supported.
|
||||
|
||||
Modules are installed in the initramfs archive under the same path they
|
||||
exist on the system where mkinitfs is executed.
|
||||
|
||||
## /usr/share/mkinitfs/dirs, /etc/mkinitfs/dirs
|
||||
|
||||
Files with the *.dirs* extension in these directories are lists of
|
||||
directories to create within the initramfs. There is no *-extra* variant,
|
||||
since directories are of negligible size.
|
||||
|
||||
# AUTHORS
|
||||
|
||||
*Clayton Craft* <clayton@craftyguy.net>
|
6
go.mod
6
go.mod
@@ -1,10 +1,10 @@
|
||||
module gitlab.com/postmarketOS/postmarketos-mkinitfs
|
||||
|
||||
go 1.16
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e
|
||||
github.com/klauspost/compress v1.13.3 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5
|
||||
github.com/klauspost/compress v1.15.12
|
||||
github.com/ulikunitz/xz v0.5.10
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
||||
)
|
||||
|
9
go.sum
9
go.sum
@@ -1,9 +1,8 @@
|
||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RSSp2Om9lubZpiMgVbvn39bsUmW9U5h0twqc=
|
||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/klauspost/compress v1.13.3 h1:BtAvtV1+h0YwSVwWoYXMREPpYu9VzTJ9QDI1TEg/iQQ=
|
||||
github.com/klauspost/compress v1.13.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
||||
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
413
internal/archive/archive.go
Normal file
413
internal/archive/archive.go
Normal file
@@ -0,0 +1,413 @@
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/cavaliercoder/go-cpio"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/ulikunitz/xz"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
||||
)
|
||||
|
||||
type CompressFormat string
|
||||
|
||||
const (
|
||||
FormatGzip CompressFormat = "gzip"
|
||||
FormatLzma CompressFormat = "lzma"
|
||||
FormatZstd CompressFormat = "zstd"
|
||||
FormatNone CompressFormat = "none"
|
||||
)
|
||||
|
||||
type CompressLevel string
|
||||
|
||||
const (
|
||||
// Mapped to the "default" level for the given format
|
||||
LevelDefault CompressLevel = "default"
|
||||
// Maps to the fastest compression level for the given format
|
||||
LevelFast CompressLevel = "fast"
|
||||
// Maps to the best compression level for the given format
|
||||
LevelBest CompressLevel = "best"
|
||||
)
|
||||
|
||||
type Archive struct {
|
||||
cpioWriter *cpio.Writer
|
||||
buf *bytes.Buffer
|
||||
compress_format CompressFormat
|
||||
compress_level CompressLevel
|
||||
items archiveItems
|
||||
}
|
||||
|
||||
func New(format CompressFormat, level CompressLevel) (*Archive, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
archive := &Archive{
|
||||
cpioWriter: cpio.NewWriter(buf),
|
||||
buf: buf,
|
||||
compress_format: format,
|
||||
compress_level: level,
|
||||
}
|
||||
|
||||
return archive, nil
|
||||
}
|
||||
|
||||
type archiveItem struct {
|
||||
header *cpio.Header
|
||||
sourcePath string
|
||||
}
|
||||
|
||||
type archiveItems struct {
|
||||
items []archiveItem
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// ExtractFormatLevel parses the given string in the format format[:level],
|
||||
// where :level is one of CompressLevel consts. If level is omitted from the
|
||||
// string, or if it can't be parsed, the level is set to the default level for
|
||||
// the given format. If format is unknown, gzip is selected. This function is
|
||||
// designed to always return something usable within this package.
|
||||
func ExtractFormatLevel(s string) (format CompressFormat, level CompressLevel) {
|
||||
|
||||
f, l, found := strings.Cut(s, ":")
|
||||
if !found {
|
||||
l = "default"
|
||||
}
|
||||
|
||||
level = CompressLevel(strings.ToLower(l))
|
||||
format = CompressFormat(strings.ToLower(f))
|
||||
switch level {
|
||||
|
||||
}
|
||||
switch level {
|
||||
case LevelBest:
|
||||
case LevelDefault:
|
||||
case LevelFast:
|
||||
default:
|
||||
log.Print("Unknown or no compression level set, using default")
|
||||
level = LevelDefault
|
||||
}
|
||||
|
||||
switch format {
|
||||
case FormatGzip:
|
||||
case FormatLzma:
|
||||
log.Println("Format lzma doesn't support a compression level, using default settings")
|
||||
level = LevelDefault
|
||||
case FormatNone:
|
||||
case FormatZstd:
|
||||
default:
|
||||
log.Print("Unknown or no compression format set, using gzip")
|
||||
format = FormatGzip
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Adds the given item to the archiveItems, only if it doesn't already exist in
|
||||
// the list. The items are kept sorted in ascending order.
|
||||
func (a *archiveItems) add(item archiveItem) {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
if len(a.items) < 1 {
|
||||
// empty list
|
||||
a.items = append(a.items, item)
|
||||
return
|
||||
}
|
||||
|
||||
// find existing item, or index of where new item should go
|
||||
i := sort.Search(len(a.items), func(i int) bool {
|
||||
return strings.Compare(item.header.Name, a.items[i].header.Name) <= 0
|
||||
})
|
||||
|
||||
if i >= len(a.items) {
|
||||
// doesn't exist in list, but would be at the very end
|
||||
a.items = append(a.items, item)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.Compare(a.items[i].header.Name, item.header.Name) == 0 {
|
||||
// already in list
|
||||
return
|
||||
}
|
||||
|
||||
// grow list by 1, shift right at index, and insert new string at index
|
||||
a.items = append(a.items, archiveItem{})
|
||||
copy(a.items[i+1:], a.items[i:])
|
||||
a.items[i] = item
|
||||
}
|
||||
|
||||
// iterate through items and send each one over the returned channel
|
||||
func (a *archiveItems) IterItems() <-chan archiveItem {
|
||||
ch := make(chan archiveItem)
|
||||
go func() {
|
||||
a.RLock()
|
||||
defer a.RUnlock()
|
||||
|
||||
for _, item := range a.items {
|
||||
ch <- item
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (archive *Archive) Write(path string, mode os.FileMode) error {
|
||||
if err := archive.writeCpio(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := archive.cpioWriter.Close(); err != nil {
|
||||
return fmt.Errorf("archive.Write: error closing archive: %w", err)
|
||||
}
|
||||
|
||||
// Write archive to path
|
||||
if err := archive.writeCompressed(path, mode); err != nil {
|
||||
return fmt.Errorf("unable to write archive to location %q: %w", path, err)
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return fmt.Errorf("unable to chmod %q to %s: %w", path, mode, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds the given items in the map to the archive. The map format is {source path:dest path}.
|
||||
// Internally this just calls AddItem on each key,value pair in the map.
|
||||
func (archive *Archive) AddItems(f filelist.FileLister) error {
|
||||
list, err := f.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range list.IterItems() {
|
||||
if err := archive.AddItem(i.Source, i.Dest); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds the given file or directory at "source" to the archive at "dest"
|
||||
func (archive *Archive) AddItem(source string, dest string) error {
|
||||
|
||||
sourceStat, err := os.Lstat(source)
|
||||
if err != nil {
|
||||
e, ok := err.(*os.PathError)
|
||||
if e.Err == syscall.ENOENT && ok {
|
||||
// doesn't exist in current filesystem, assume it's a new directory
|
||||
return archive.addDir(dest)
|
||||
}
|
||||
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
|
||||
}
|
||||
|
||||
if sourceStat.Mode()&os.ModeDir != 0 {
|
||||
return archive.addDir(dest)
|
||||
}
|
||||
|
||||
return archive.addFile(source, dest)
|
||||
}
|
||||
|
||||
func (archive *Archive) addFile(source string, dest string) error {
|
||||
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sourceStat, err := os.Lstat(source)
|
||||
if err != nil {
|
||||
log.Print("addFile: failed to stat file: ", source)
|
||||
return err
|
||||
}
|
||||
|
||||
// Symlink: write symlink to archive then set 'file' to link target
|
||||
if sourceStat.Mode()&os.ModeSymlink != 0 {
|
||||
// log.Printf("File %q is a symlink", file)
|
||||
target, err := os.Readlink(source)
|
||||
if err != nil {
|
||||
log.Print("addFile: failed to get symlink target: ", source)
|
||||
return err
|
||||
}
|
||||
|
||||
destFilename := strings.TrimPrefix(dest, "/")
|
||||
|
||||
archive.items.add(archiveItem{
|
||||
sourcePath: source,
|
||||
header: &cpio.Header{
|
||||
Name: destFilename,
|
||||
Linkname: target,
|
||||
Mode: 0644 | cpio.ModeSymlink,
|
||||
Size: int64(len(target)),
|
||||
// Checksum: 1,
|
||||
},
|
||||
})
|
||||
|
||||
if filepath.Dir(target) == "." {
|
||||
target = filepath.Join(filepath.Dir(source), target)
|
||||
}
|
||||
// make sure target is an absolute path
|
||||
if !filepath.IsAbs(target) {
|
||||
target, err = osutil.RelativeSymlinkTargetToDir(target, filepath.Dir(source))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = archive.addFile(target, target)
|
||||
return err
|
||||
}
|
||||
|
||||
destFilename := strings.TrimPrefix(dest, "/")
|
||||
|
||||
archive.items.add(archiveItem{
|
||||
sourcePath: source,
|
||||
header: &cpio.Header{
|
||||
Name: destFilename,
|
||||
Mode: cpio.FileMode(sourceStat.Mode().Perm()),
|
||||
Size: sourceStat.Size(),
|
||||
// Checksum: 1,
|
||||
},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) writeCompressed(path string, mode os.FileMode) (err error) {
|
||||
|
||||
var compressor io.WriteCloser
|
||||
defer func() {
|
||||
e := compressor.Close()
|
||||
if e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
}()
|
||||
|
||||
fd, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Note: fd.Close omitted since it'll be closed in "compressor"
|
||||
|
||||
switch archive.compress_format {
|
||||
case FormatGzip:
|
||||
level := gzip.DefaultCompression
|
||||
switch archive.compress_level {
|
||||
case LevelBest:
|
||||
level = gzip.BestCompression
|
||||
case LevelFast:
|
||||
level = gzip.BestSpeed
|
||||
}
|
||||
compressor, err = gzip.NewWriterLevel(fd, level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case FormatLzma:
|
||||
compressor, err = xz.NewWriter(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case FormatNone:
|
||||
compressor = fd
|
||||
case FormatZstd:
|
||||
level := zstd.SpeedDefault
|
||||
switch archive.compress_level {
|
||||
case LevelBest:
|
||||
level = zstd.SpeedBestCompression
|
||||
case LevelFast:
|
||||
level = zstd.SpeedFastest
|
||||
}
|
||||
compressor, err = zstd.NewWriter(fd, zstd.WithEncoderLevel(level))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
log.Print("Unknown or no compression format set, using gzip")
|
||||
compressor = gzip.NewWriter(fd)
|
||||
}
|
||||
|
||||
if _, err = io.Copy(compressor, archive.buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// call fsync just to be sure
|
||||
if err := fd.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) writeCpio() error {
|
||||
// having a transient function for actually adding files to the archive
|
||||
// allows the deferred fd.close to run after every copy and prevent having
|
||||
// tons of open file handles until the copying is all done
|
||||
copyToArchive := func(source string, header *cpio.Header) error {
|
||||
|
||||
if err := archive.cpioWriter.WriteHeader(header); err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: unable to write header: %w", err)
|
||||
}
|
||||
|
||||
// don't copy actual dirs into the archive, writing the header is enough
|
||||
if !header.Mode.IsDir() {
|
||||
if header.Mode.IsRegular() {
|
||||
fd, err := os.Open(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: uname to open file %q, %w", source, err)
|
||||
}
|
||||
defer fd.Close()
|
||||
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: unable to write out archive: %w", err)
|
||||
}
|
||||
} else if header.Linkname != "" {
|
||||
// the contents of a symlink is just need the link name
|
||||
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
|
||||
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("archive.writeCpio: unknown type for file: %s", source)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range archive.items.IterItems() {
|
||||
if err := copyToArchive(i.sourcePath, i.header); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) addDir(dir string) error {
|
||||
if dir == "/" {
|
||||
dir = "."
|
||||
}
|
||||
|
||||
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
|
||||
for i, subdir := range subdirs {
|
||||
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
|
||||
archive.items.add(archiveItem{
|
||||
sourcePath: path,
|
||||
header: &cpio.Header{
|
||||
Name: path,
|
||||
Mode: cpio.ModeDir | 0755,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
272
internal/archive/archive_test.go
Normal file
272
internal/archive/archive_test.go
Normal file
@@ -0,0 +1,272 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/cavaliercoder/go-cpio"
|
||||
)
|
||||
|
||||
func TestArchiveItemsAdd(t *testing.T) {
|
||||
subtests := []struct {
|
||||
name string
|
||||
inItems []archiveItem
|
||||
inItem archiveItem
|
||||
expected []archiveItem
|
||||
}{
|
||||
{
|
||||
name: "empty list",
|
||||
inItems: []archiveItem{},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "already exists",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add new",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar1",
|
||||
header: &cpio.Header{Name: "/foo/bar1"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/foo/bar0",
|
||||
header: &cpio.Header{Name: "/foo/bar0"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar0",
|
||||
header: &cpio.Header{Name: "/foo/bar0"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar1",
|
||||
header: &cpio.Header{Name: "/foo/bar1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add new at beginning",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo/bar",
|
||||
header: &cpio.Header{Name: "/foo/bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add new at end",
|
||||
inItems: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
},
|
||||
inItem: archiveItem{
|
||||
sourcePath: "/zzz/bazz",
|
||||
header: &cpio.Header{Name: "/zzz/bazz"},
|
||||
},
|
||||
expected: []archiveItem{
|
||||
{
|
||||
sourcePath: "/bazz/bar",
|
||||
header: &cpio.Header{Name: "/bazz/bar"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/foo",
|
||||
header: &cpio.Header{Name: "/foo"},
|
||||
},
|
||||
{
|
||||
sourcePath: "/zzz/bazz",
|
||||
header: &cpio.Header{Name: "/zzz/bazz"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, st := range subtests {
|
||||
t.Run(st.name, func(t *testing.T) {
|
||||
a := archiveItems{items: st.inItems}
|
||||
a.add(st.inItem)
|
||||
if !reflect.DeepEqual(st.expected, a.items) {
|
||||
t.Fatal("expected:", st.expected, " got: ", a.items)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractFormatLevel(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in string
|
||||
expectedFormat CompressFormat
|
||||
expectedLevel CompressLevel
|
||||
}{
|
||||
{
|
||||
name: "gzip, default level",
|
||||
in: "gzip:default",
|
||||
expectedFormat: FormatGzip,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "unknown format, level 12",
|
||||
in: "pear:12",
|
||||
expectedFormat: FormatGzip,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "zstd, level not given",
|
||||
in: "zstd",
|
||||
expectedFormat: FormatZstd,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "zstd, invalid level 'fast:'",
|
||||
in: "zstd:fast:",
|
||||
expectedFormat: FormatZstd,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "zstd, best",
|
||||
in: "zstd:best",
|
||||
expectedFormat: FormatZstd,
|
||||
expectedLevel: LevelBest,
|
||||
},
|
||||
{
|
||||
name: "zstd, level empty :",
|
||||
in: "zstd:",
|
||||
expectedFormat: FormatZstd,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "gzip, best",
|
||||
in: "gzip:best",
|
||||
expectedFormat: FormatGzip,
|
||||
expectedLevel: LevelBest,
|
||||
},
|
||||
{
|
||||
name: "<empty>, <empty>",
|
||||
in: "",
|
||||
expectedFormat: FormatGzip,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "lzma, fast",
|
||||
in: "lzma:fast",
|
||||
expectedFormat: FormatLzma,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
{
|
||||
name: "none",
|
||||
in: "none",
|
||||
expectedFormat: FormatNone,
|
||||
expectedLevel: LevelDefault,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
format, level := ExtractFormatLevel(test.in)
|
||||
if format != test.expectedFormat {
|
||||
t.Fatal("format expected: ", test.expectedFormat, " got: ", format)
|
||||
}
|
||||
if level != test.expectedLevel {
|
||||
t.Fatal("level expected: ", test.expectedLevel, " got: ", level)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
153
internal/bootdeploy/bootdeploy.go
Normal file
153
internal/bootdeploy/bootdeploy.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package bootdeploy
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type BootDeploy struct {
|
||||
inDir string
|
||||
outDir string
|
||||
ubootBoardname string
|
||||
}
|
||||
|
||||
// New returns a new BootDeploy, which then runs:
|
||||
//
|
||||
// boot-deploy -d indir -o outDir
|
||||
//
|
||||
// ubootBoardname is used for copying in some u-boot files prior to running
|
||||
// boot-deploy. This is optional, passing an empty string is ok if this is not
|
||||
// needed.
|
||||
func New(inDir, outDir, ubootBoardname string) *BootDeploy {
|
||||
return &BootDeploy{
|
||||
inDir: inDir,
|
||||
outDir: outDir,
|
||||
ubootBoardname: ubootBoardname,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BootDeploy) Run() error {
|
||||
|
||||
if err := copyUbootFiles(b.inDir, b.ubootBoardname); errors.Is(err, os.ErrNotExist) {
|
||||
log.Println("u-boot files copying skipped: ", err)
|
||||
} else {
|
||||
if err != nil {
|
||||
log.Fatal("copyUbootFiles: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
return bootDeploy(b.inDir, b.outDir)
|
||||
}
|
||||
|
||||
func bootDeploy(workDir string, outDir string) error {
|
||||
// boot-deploy expects the kernel to be in the same dir as initramfs.
|
||||
// Assume that the kernel is in the output dir...
|
||||
kernels, _ := filepath.Glob(filepath.Join(outDir, "vmlinuz*"))
|
||||
if len(kernels) == 0 {
|
||||
return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*"))
|
||||
}
|
||||
|
||||
// Pick a kernel that does not have suffixes added by boot-deploy
|
||||
var kernFile string
|
||||
for _, f := range kernels {
|
||||
if strings.HasSuffix(f, "-dtb") || strings.HasSuffix(f, "-mtk") {
|
||||
continue
|
||||
}
|
||||
kernFile = f
|
||||
break
|
||||
}
|
||||
|
||||
kernFd, err := os.Open(kernFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer kernFd.Close()
|
||||
|
||||
kernFilename := path.Base(kernFile)
|
||||
kernFileCopy, err := os.Create(filepath.Join(workDir, kernFilename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(kernFileCopy, kernFd); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kernFileCopy.Close(); err != nil {
|
||||
return fmt.Errorf("error closing %s: %w", kernFilename, err)
|
||||
}
|
||||
|
||||
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
|
||||
cmd := exec.Command("boot-deploy",
|
||||
"-i", "initramfs",
|
||||
"-k", kernFilename,
|
||||
"-d", workDir,
|
||||
"-o", outDir,
|
||||
"initramfs-extra")
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy copies the file at srcFile path to a new file at dstFile path
|
||||
func copy(srcFile, dstFile string) error {
|
||||
out, err := os.Create(dstFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
errClose := out.Close()
|
||||
if err == nil {
|
||||
err = errClose
|
||||
}
|
||||
}()
|
||||
|
||||
in, err := os.Open(srcFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyUbootFiles uses deviceinfo_uboot_boardname to copy u-boot files required
|
||||
// for running boot-deploy
|
||||
func copyUbootFiles(path, ubootBoardname string) error {
|
||||
if ubootBoardname == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
srcDir := filepath.Join("/usr/share/u-boot", ubootBoardname)
|
||||
entries, err := os.ReadDir(srcDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
sourcePath := filepath.Join(srcDir, entry.Name())
|
||||
destPath := filepath.Join(path, entry.Name())
|
||||
|
||||
if err := copy(sourcePath, destPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
65
internal/filelist/filelist.go
Normal file
65
internal/filelist/filelist.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package filelist
|
||||
|
||||
import "sync"
|
||||
|
||||
type FileLister interface {
|
||||
List() (*FileList, error)
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Source string
|
||||
Dest string
|
||||
}
|
||||
|
||||
type FileList struct {
|
||||
m map[string]string
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewFileList() *FileList {
|
||||
return &FileList{
|
||||
m: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileList) Add(src string, dest string) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
f.m[src] = dest
|
||||
}
|
||||
|
||||
func (f *FileList) Get(src string) (string, bool) {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
|
||||
dest, found := f.m[src]
|
||||
return dest, found
|
||||
}
|
||||
|
||||
// Import copies in the contents of src. If a source path already exists when
|
||||
// importing, then the destination path is updated with the new value.
|
||||
func (f *FileList) Import(src *FileList) {
|
||||
for i := range src.IterItems() {
|
||||
f.Add(i.Source, i.Dest)
|
||||
}
|
||||
}
|
||||
|
||||
// iterate through the list and and send each one as a new File over the
|
||||
// returned channel
|
||||
func (f *FileList) IterItems() <-chan File {
|
||||
ch := make(chan File)
|
||||
go func() {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
|
||||
for src, dest := range f.m {
|
||||
ch <- File{
|
||||
Source: src,
|
||||
Dest: dest,
|
||||
}
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
51
internal/filelist/hookdirs/hookdirs.go
Normal file
51
internal/filelist/hookdirs/hookdirs.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package hookdirs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
)
|
||||
|
||||
type HookDirs struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// New returns a new HookDirs that will use the given path to provide a list
|
||||
// of directories use.
|
||||
func New(path string) *HookDirs {
|
||||
return &HookDirs{
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HookDirs) List() (*filelist.FileList, error) {
|
||||
log.Printf("- Searching for directories specified in %s", h.path)
|
||||
|
||||
files := filelist.NewFileList()
|
||||
fileInfo, err := os.ReadDir(h.path)
|
||||
if err != nil {
|
||||
log.Println("-- Unable to find dir, skipping...")
|
||||
return files, nil
|
||||
}
|
||||
for _, file := range fileInfo {
|
||||
path := filepath.Join(h.path, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getHookDirs: unable to open hook file: %w", err)
|
||||
|
||||
}
|
||||
defer f.Close()
|
||||
log.Printf("-- Creating directories from: %s\n", path)
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
dir := s.Text()
|
||||
files.Add(dir, dir)
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
}
|
83
internal/filelist/hookfiles/hookfiles.go
Normal file
83
internal/filelist/hookfiles/hookfiles.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package hookfiles
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||
)
|
||||
|
||||
type HookFiles struct {
|
||||
filePath string
|
||||
}
|
||||
|
||||
// New returns a new HookFiles that will use the given path to provide a list
|
||||
// of files + any binary dependencies they might have.
|
||||
func New(filePath string) *HookFiles {
|
||||
return &HookFiles{
|
||||
filePath: filePath,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HookFiles) List() (*filelist.FileList, error) {
|
||||
log.Printf("- Searching for file lists from %s", h.filePath)
|
||||
|
||||
files := filelist.NewFileList()
|
||||
fileInfo, err := os.ReadDir(h.filePath)
|
||||
if err != nil {
|
||||
log.Println("-- Unable to find dir, skipping...")
|
||||
return files, nil
|
||||
}
|
||||
for _, file := range fileInfo {
|
||||
path := filepath.Join(h.filePath, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getHookFiles: unable to open hook file: %w", err)
|
||||
|
||||
}
|
||||
defer f.Close()
|
||||
log.Printf("-- Including files from: %s\n", path)
|
||||
|
||||
if list, err := slurpFiles(f); err != nil {
|
||||
return nil, fmt.Errorf("hookfiles: unable to process hook file %q: %w", path, err)
|
||||
} else {
|
||||
files.Import(list)
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func slurpFiles(fd io.Reader) (*filelist.FileList, error) {
|
||||
files := filelist.NewFileList()
|
||||
|
||||
s := bufio.NewScanner(fd)
|
||||
for s.Scan() {
|
||||
src, dest, has_dest := strings.Cut(s.Text(), ":")
|
||||
|
||||
fFiles, err := misc.GetFiles([]string{src}, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to add %q: %w", src, err)
|
||||
}
|
||||
// loop over all returned files from GetFile
|
||||
for _, file := range fFiles {
|
||||
if !has_dest {
|
||||
files.Add(file, file)
|
||||
} else if len(fFiles) > 1 {
|
||||
// Don't support specifying dest if src was a glob
|
||||
// NOTE: this could support this later...
|
||||
files.Add(file, file)
|
||||
} else {
|
||||
// dest path specified, and only 1 file
|
||||
files.Add(file, dest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return files, s.Err()
|
||||
}
|
42
internal/filelist/hookscripts/hookscripts.go
Normal file
42
internal/filelist/hookscripts/hookscripts.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package hookscripts
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
)
|
||||
|
||||
type HookScripts struct {
|
||||
destPath string
|
||||
scriptsDir string
|
||||
}
|
||||
|
||||
// New returns a new HookScripts that will use the given path to provide a list
|
||||
// of script files. The destination for each script it set to destPath, using
|
||||
// the original file name.
|
||||
func New(scriptsDir string, destPath string) *HookScripts {
|
||||
return &HookScripts{
|
||||
destPath: destPath,
|
||||
scriptsDir: scriptsDir,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HookScripts) List() (*filelist.FileList, error) {
|
||||
log.Printf("- Searching for hook scripts from %s", h.scriptsDir)
|
||||
|
||||
files := filelist.NewFileList()
|
||||
|
||||
fileInfo, err := os.ReadDir(h.scriptsDir)
|
||||
if err != nil {
|
||||
log.Println("-- Unable to find dir, skipping...")
|
||||
return files, nil
|
||||
}
|
||||
for _, file := range fileInfo {
|
||||
path := filepath.Join(h.scriptsDir, file.Name())
|
||||
log.Printf("-- Including script: %s\n", path)
|
||||
files.Add(path, filepath.Join(h.destPath, file.Name()))
|
||||
}
|
||||
return files, nil
|
||||
}
|
34
internal/filelist/initramfs/initramfs.go
Normal file
34
internal/filelist/initramfs/initramfs.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package initramfs
|
||||
|
||||
import (
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
)
|
||||
|
||||
// Initramfs allows building arbitrarily complex lists of features, by slurping
|
||||
// up types that implement FileLister (which includes this type! yippee) and
|
||||
// combining the output from them.
|
||||
type Initramfs struct {
|
||||
features []filelist.FileLister
|
||||
}
|
||||
|
||||
// New returns a new Initramfs that generate a list of files based on the given
|
||||
// list of FileListers.
|
||||
func New(features []filelist.FileLister) *Initramfs {
|
||||
return &Initramfs{
|
||||
features: features,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Initramfs) List() (*filelist.FileList, error) {
|
||||
files := filelist.NewFileList()
|
||||
|
||||
for _, f := range i.features {
|
||||
list, err := f.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files.Import(list)
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
220
internal/filelist/modules/modules.go
Normal file
220
internal/filelist/modules/modules.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
||||
)
|
||||
|
||||
type Modules struct {
|
||||
modulesListPath string
|
||||
modulesList []string
|
||||
}
|
||||
|
||||
// New returns a new Modules that will use the given moduleto provide a list
|
||||
// of script files.
|
||||
func New(modulesList []string, modulesListPath string) *Modules {
|
||||
return &Modules{
|
||||
modulesList: modulesList,
|
||||
modulesListPath: modulesListPath,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Modules) List() (*filelist.FileList, error) {
|
||||
kernVer, err := osutil.GetKernelVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files := filelist.NewFileList()
|
||||
|
||||
modDir := filepath.Join("/lib/modules", kernVer)
|
||||
if exists, err := misc.Exists(modDir); !exists {
|
||||
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
||||
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
||||
return files, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", modDir, err)
|
||||
}
|
||||
|
||||
// modules.* required by modprobe
|
||||
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
|
||||
for _, file := range modprobeFiles {
|
||||
files.Add(file, file)
|
||||
}
|
||||
|
||||
// slurp up given list of modules
|
||||
if len(m.modulesList) > 0 {
|
||||
log.Printf("-- Including kernel modules from deviceinfo")
|
||||
for _, module := range m.modulesList {
|
||||
if modFilelist, err := getModule(module, modDir); err != nil {
|
||||
return nil, fmt.Errorf("unable to get modules from deviceinfo: %w", err)
|
||||
} else {
|
||||
for _, file := range modFilelist {
|
||||
files.Add(file, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// slurp up modules from lists in modulesListPath
|
||||
log.Printf("- Searching for kernel modules from %s", m.modulesListPath)
|
||||
fileInfo, err := os.ReadDir(m.modulesListPath)
|
||||
if err != nil {
|
||||
log.Println("-- Unable to find dir, skipping...")
|
||||
return files, nil
|
||||
}
|
||||
for _, file := range fileInfo {
|
||||
path := filepath.Join(m.modulesListPath, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open module list file %q: %w", path, err)
|
||||
}
|
||||
defer f.Close()
|
||||
log.Printf("-- Including modules from: %s\n", path)
|
||||
|
||||
if list, err := slurpModules(f, modDir); err != nil {
|
||||
return nil, fmt.Errorf("unable to process module list file %q: %w", path, err)
|
||||
} else {
|
||||
files.Import(list)
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
|
||||
files := filelist.NewFileList()
|
||||
s := bufio.NewScanner(fd)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
dir, file := filepath.Split(line)
|
||||
if file == "" {
|
||||
// item is a directory
|
||||
dir = filepath.Join(modDir, dir)
|
||||
dirs, _ := filepath.Glob(dir)
|
||||
for _, d := range dirs {
|
||||
if modFilelist, err := getModulesInDir(d); err != nil {
|
||||
return nil, fmt.Errorf("unable to get modules dir %q: %w", d, err)
|
||||
} else {
|
||||
for _, file := range modFilelist {
|
||||
files.Add(file, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if dir == "" {
|
||||
// item is a module name
|
||||
if modFilelist, err := getModule(s.Text(), modDir); err != nil {
|
||||
return nil, fmt.Errorf("unable to get module file %q: %w", s.Text(), err)
|
||||
} else {
|
||||
for _, file := range modFilelist {
|
||||
files.Add(file, file)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unknown module entry: %q", line)
|
||||
}
|
||||
}
|
||||
|
||||
return files, s.Err()
|
||||
}
|
||||
|
||||
func getModulesInDir(modPath string) (files []string, err error) {
|
||||
err = filepath.Walk(modPath, func(path string, _ os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
// Unable to walk path
|
||||
return err
|
||||
}
|
||||
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
|
||||
return nil
|
||||
}
|
||||
files = append(files, path)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
|
||||
// file and all of its dependencies.
|
||||
// Note: it's not necessarily fatal if the module is not found, since it may
|
||||
// have been built into the kernel
|
||||
func getModule(modName string, modDir string) (files []string, err error) {
|
||||
|
||||
modDep := filepath.Join(modDir, "modules.dep")
|
||||
if exists, err := misc.Exists(modDep); !exists {
|
||||
return nil, fmt.Errorf("kernel module.dep not found: %s", modDir)
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("received unexpected error when getting module.dep status: %w", err)
|
||||
}
|
||||
|
||||
fd, err := os.Open(modDep)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open modules.dep: %w", err)
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
deps, err := getModuleDeps(modName, fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, dep := range deps {
|
||||
p := filepath.Join(modDir, dep)
|
||||
if exists, err := misc.Exists(p); !exists {
|
||||
return nil, fmt.Errorf("tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p)
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", p, err)
|
||||
}
|
||||
|
||||
files = append(files, p)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get the canonicalized name for the module as represented in the given modules.dep io.reader
|
||||
func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
|
||||
var deps []string
|
||||
|
||||
// split the module name on - and/or _, build a regex for matching
|
||||
splitRe := regexp.MustCompile("[-_]+")
|
||||
modNameReStr := splitRe.ReplaceAllString(modName, "[-_]+")
|
||||
re := regexp.MustCompile("^" + modNameReStr + "$")
|
||||
|
||||
s := bufio.NewScanner(modulesDep)
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
if len(fields) == 0 {
|
||||
continue
|
||||
}
|
||||
fields[0] = strings.TrimSuffix(fields[0], ":")
|
||||
|
||||
found := re.FindAll([]byte(filepath.Base(stripExts(fields[0]))), -1)
|
||||
if len(found) > 0 {
|
||||
deps = append(deps, fields...)
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
log.Print("Unable to get module + dependencies: ", modName)
|
||||
return deps, err
|
||||
}
|
||||
|
||||
return deps, nil
|
||||
}
|
||||
|
||||
func stripExts(file string) string {
|
||||
return strings.Split(file, ".")[0]
|
||||
}
|
@@ -1,7 +1,7 @@
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// Copyright 2023 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package main
|
||||
package modules
|
||||
|
||||
import (
|
||||
"strings"
|
||||
@@ -27,18 +27,6 @@ func TestStripExts(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func stringSlicesEqual(a []string, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, v := range a {
|
||||
if v != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var testModuleDep string = `
|
||||
kernel/sound/soc/codecs/snd-soc-msm8916-digital.ko:
|
||||
kernel/net/sched/act_ipt.ko.xz: kernel/net/netfilter/x_tables.ko.xz
|
||||
@@ -80,3 +68,15 @@ func TestGetModuleDeps(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func stringSlicesEqual(a []string, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, v := range a {
|
||||
if v != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
158
internal/filelist/osksdl/osksdl.go
Normal file
158
internal/filelist/osksdl/osksdl.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package osksdl
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||
)
|
||||
|
||||
type OskSdl struct {
|
||||
mesaDriver string
|
||||
}
|
||||
|
||||
// New returns a new HookScripts that will use the given path to provide a list
|
||||
// of script files.
|
||||
func New(mesaDriverName string) *OskSdl {
|
||||
return &OskSdl{
|
||||
mesaDriver: mesaDriverName,
|
||||
}
|
||||
}
|
||||
|
||||
// Get a list of files and their dependencies related to supporting rootfs full
|
||||
// disk (d)encryption
|
||||
func (s *OskSdl) List() (*filelist.FileList, error) {
|
||||
files := filelist.NewFileList()
|
||||
|
||||
if exists, err := misc.Exists("/usr/bin/osk-sdl"); !exists {
|
||||
return files, nil
|
||||
} else if err != nil {
|
||||
return files, fmt.Errorf("received unexpected error when getting status for %q: %w", "/usr/bin/osk-sdl", err)
|
||||
}
|
||||
|
||||
log.Println("- Including osk-sdl support")
|
||||
|
||||
confFiles := []string{
|
||||
"/etc/osk.conf",
|
||||
"/etc/ts.conf",
|
||||
"/etc/pointercal",
|
||||
"/etc/fb.modes",
|
||||
"/etc/directfbrc",
|
||||
}
|
||||
confFileList, err := misc.GetFiles(confFiles, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||
}
|
||||
for _, file := range confFileList {
|
||||
files.Add(file, file)
|
||||
}
|
||||
|
||||
// osk-sdl
|
||||
oskFiles := []string{
|
||||
"/usr/bin/osk-sdl",
|
||||
"/sbin/cryptsetup",
|
||||
"/usr/lib/libGL.so.1",
|
||||
}
|
||||
if oskFileList, err := misc.GetFiles(oskFiles, true); err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||
} else {
|
||||
for _, file := range oskFileList {
|
||||
files.Add(file, file)
|
||||
}
|
||||
}
|
||||
|
||||
fontFile, err := getOskConfFontPath("/etc/osk.conf")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add file %q: %w", fontFile, err)
|
||||
}
|
||||
files.Add(fontFile, fontFile)
|
||||
|
||||
// Directfb
|
||||
dfbFiles := []string{}
|
||||
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
|
||||
if filepath.Ext(path) == ".so" {
|
||||
dfbFiles = append(dfbFiles, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add file %w", err)
|
||||
}
|
||||
if dfbFileList, err := misc.GetFiles(dfbFiles, true); err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||
} else {
|
||||
for _, file := range dfbFileList {
|
||||
files.Add(file, file)
|
||||
}
|
||||
}
|
||||
|
||||
// tslib
|
||||
tslibFiles := []string{}
|
||||
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
|
||||
if filepath.Ext(path) == ".so" {
|
||||
tslibFiles = append(tslibFiles, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add file: %w", err)
|
||||
}
|
||||
libts, _ := filepath.Glob("/usr/lib/libts*")
|
||||
tslibFiles = append(tslibFiles, libts...)
|
||||
if tslibFileList, err := misc.GetFiles(tslibFiles, true); err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||
} else {
|
||||
for _, file := range tslibFileList {
|
||||
files.Add(file, file)
|
||||
}
|
||||
}
|
||||
|
||||
// mesa hw accel
|
||||
if s.mesaDriver != "" {
|
||||
mesaFiles := []string{
|
||||
"/usr/lib/libEGL.so.1",
|
||||
"/usr/lib/libGLESv2.so.2",
|
||||
"/usr/lib/libgbm.so.1",
|
||||
"/usr/lib/libudev.so.1",
|
||||
"/usr/lib/xorg/modules/dri/" + s.mesaDriver + "_dri.so",
|
||||
}
|
||||
if mesaFileList, err := misc.GetFiles(mesaFiles, true); err != nil {
|
||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
||||
} else {
|
||||
for _, file := range mesaFileList {
|
||||
files.Add(file, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func getOskConfFontPath(oskConfPath string) (string, error) {
|
||||
var path string
|
||||
f, err := os.Open(oskConfPath)
|
||||
if err != nil {
|
||||
return path, err
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
// "key = val" is 3 fields
|
||||
if len(fields) > 2 && fields[0] == "keyboard-font" {
|
||||
path = fields[2]
|
||||
}
|
||||
}
|
||||
if exists, err := misc.Exists(path); !exists {
|
||||
return path, fmt.Errorf("unable to find font: %s", path)
|
||||
} else if err != nil {
|
||||
return path, fmt.Errorf("received unexpected error when getting status for %q: %w", path, err)
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
165
internal/misc/getfiles.go
Normal file
165
internal/misc/getfiles.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package misc
|
||||
|
||||
import (
|
||||
"debug/elf"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
||||
)
|
||||
|
||||
func GetFiles(list []string, required bool) (files []string, err error) {
|
||||
for _, file := range list {
|
||||
filelist, err := getFile(file, required)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, filelist...)
|
||||
}
|
||||
|
||||
files = RemoveDuplicates(files)
|
||||
return
|
||||
}
|
||||
|
||||
func getFile(file string, required bool) (files []string, err error) {
|
||||
// Expand glob expression
|
||||
expanded, err := filepath.Glob(file)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(expanded) > 0 && expanded[0] != file {
|
||||
for _, path := range expanded {
|
||||
if globFiles, err := getFile(path, required); err != nil {
|
||||
return files, err
|
||||
} else {
|
||||
files = append(files, globFiles...)
|
||||
}
|
||||
}
|
||||
return RemoveDuplicates(files), nil
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(file)
|
||||
if err != nil {
|
||||
if required {
|
||||
return files, fmt.Errorf("getFile: failed to stat file %q: %w", file, err)
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
if fileInfo.IsDir() {
|
||||
// Recurse over directory contents
|
||||
err := filepath.Walk(file, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
newFiles, err := getFile(path, required)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files = append(files, newFiles...)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return files, err
|
||||
}
|
||||
} else {
|
||||
files = append(files, file)
|
||||
|
||||
// get dependencies for binaries
|
||||
if _, err := elf.Open(file); err == nil {
|
||||
if binaryDepFiles, err := getBinaryDeps(file); err != nil {
|
||||
return files, err
|
||||
} else {
|
||||
files = append(files, binaryDepFiles...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
files = RemoveDuplicates(files)
|
||||
return
|
||||
}
|
||||
|
||||
func getDeps(file string, parents map[string]struct{}) (files []string, err error) {
|
||||
|
||||
if _, found := parents[file]; found {
|
||||
return
|
||||
}
|
||||
|
||||
// get dependencies for binaries
|
||||
fd, err := elf.Open(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getDeps: unable to open elf binary %q: %w", file, err)
|
||||
}
|
||||
libs, _ := fd.ImportedLibraries()
|
||||
fd.Close()
|
||||
files = append(files, file)
|
||||
parents[file] = struct{}{}
|
||||
|
||||
if len(libs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// we don't recursively search these paths for performance reasons
|
||||
libdirGlobs := []string{
|
||||
"/usr/lib",
|
||||
"/lib",
|
||||
"/usr/lib/expect*",
|
||||
}
|
||||
|
||||
for _, lib := range libs {
|
||||
found := false
|
||||
findDepLoop:
|
||||
for _, libdirGlob := range libdirGlobs {
|
||||
libdirs, _ := filepath.Glob(libdirGlob)
|
||||
for _, libdir := range libdirs {
|
||||
path := filepath.Join(libdir, lib)
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
binaryDepFiles, err := getDeps(path, parents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, binaryDepFiles...)
|
||||
files = append(files, path)
|
||||
found = true
|
||||
break findDepLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, fmt.Errorf("getDeps: unable to locate dependency for %q: %s", file, lib)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively list all dependencies for a given ELF binary
|
||||
func getBinaryDeps(file string) ([]string, error) {
|
||||
// if file is a symlink, resolve dependencies for target
|
||||
fileStat, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getBinaryDeps: failed to stat file %q: %w", file, err)
|
||||
}
|
||||
|
||||
// Symlink: write symlink to archive then set 'file' to link target
|
||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||
target, err := os.Readlink(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getBinaryDeps: unable to read symlink %q: %w", file, err)
|
||||
}
|
||||
if !filepath.IsAbs(target) {
|
||||
target, err = osutil.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
file = target
|
||||
}
|
||||
|
||||
return getDeps(file, make(map[string]struct{}))
|
||||
|
||||
}
|
65
internal/misc/misc.go
Normal file
65
internal/misc/misc.go
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package misc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Merge the contents of "b" into "a", overwriting any previously existing keys
|
||||
// in "a"
|
||||
func Merge(a map[string]string, b map[string]string) {
|
||||
for k, v := range b {
|
||||
a[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Removes duplicate entries from the given string slice and returns a slice
|
||||
// with the unique values
|
||||
func RemoveDuplicates(in []string) (out []string) {
|
||||
// use a map to "remove" duplicates. the value in the map is totally
|
||||
// irrelevant
|
||||
outMap := make(map[string]bool)
|
||||
for _, s := range in {
|
||||
if ok := outMap[s]; !ok {
|
||||
outMap[s] = true
|
||||
}
|
||||
}
|
||||
|
||||
out = make([]string, 0, len(outMap))
|
||||
for k := range outMap {
|
||||
out = append(out, k)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Prints the execution time of a function, not meant to be very
|
||||
// sensitive/accurate, but good enough to gauge rough run times.
|
||||
// Meant to be called as:
|
||||
//
|
||||
// defer misc.TimeFunc(time.Now(), "foo")
|
||||
func TimeFunc(start time.Time, name string) {
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("%s completed in: %.2fs", name, elapsed.Seconds())
|
||||
}
|
||||
|
||||
// Exists tests if the given file/dir exists or not. Returns any errors related
|
||||
// to os.Stat if the type is *not* ErrNotExist. If an error is returned, then
|
||||
// the value of the returned boolean cannot be trusted.
|
||||
func Exists(file string) (bool, error) {
|
||||
_, err := os.Stat(file)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
} else if errors.Is(err, os.ErrNotExist) {
|
||||
// Don't return the error, the file doesn't exist which is OK
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Other errors from os.Stat returned here
|
||||
return false, err
|
||||
}
|
125
internal/misc/misc_test.go
Normal file
125
internal/misc/misc_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package misc
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
subtests := []struct {
|
||||
name string
|
||||
inA map[string]string
|
||||
inB map[string]string
|
||||
expected map[string]string
|
||||
}{
|
||||
{
|
||||
name: "empty B",
|
||||
inA: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
inB: map[string]string{},
|
||||
expected: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty A",
|
||||
inA: map[string]string{},
|
||||
inB: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "both populated, some duplicates",
|
||||
inA: map[string]string{
|
||||
"bar": "bazz",
|
||||
"banana": "yellow",
|
||||
"guava": "green",
|
||||
},
|
||||
inB: map[string]string{
|
||||
"foo": "bar",
|
||||
"banana": "airplane",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"foo": "bar",
|
||||
"guava": "green",
|
||||
"banana": "airplane",
|
||||
"bar": "bazz",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, st := range subtests {
|
||||
t.Run(st.name, func(t *testing.T) {
|
||||
out := st.inA
|
||||
Merge(out, st.inB)
|
||||
if !reflect.DeepEqual(st.expected, out) {
|
||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveDuplicates(t *testing.T) {
|
||||
subtests := []struct {
|
||||
name string
|
||||
in []string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "no duplicates",
|
||||
in: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"banana",
|
||||
"airplane",
|
||||
},
|
||||
expected: []string{
|
||||
"foo",
|
||||
"bar",
|
||||
"banana",
|
||||
"airplane",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all duplicates",
|
||||
in: []string{
|
||||
"foo",
|
||||
"foo",
|
||||
"foo",
|
||||
"foo",
|
||||
},
|
||||
expected: []string{
|
||||
"foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
in: []string{},
|
||||
expected: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, st := range subtests {
|
||||
t.Run(st.name, func(t *testing.T) {
|
||||
// note: sorting to make comparison easier later
|
||||
sort.Strings(st.expected)
|
||||
out := RemoveDuplicates(st.in)
|
||||
sort.Strings(out)
|
||||
if !reflect.DeepEqual(st.expected, out) {
|
||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -1,16 +1,14 @@
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package misc
|
||||
package osutil
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
"strings"
|
||||
|
||||
type StringSet map[string]bool
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
||||
// absolute path
|
||||
@@ -48,3 +46,29 @@ func FreeSpace(path string) (uint64, error) {
|
||||
size := stat.Bavail * uint64(stat.Bsize)
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func getKernelReleaseFile() (string, error) {
|
||||
files, _ := filepath.Glob("/usr/share/kernel/*/kernel.release")
|
||||
// only one kernel flavor supported
|
||||
if len(files) != 1 {
|
||||
return "", fmt.Errorf("only one kernel release/flavor is supported, found: %q", files)
|
||||
}
|
||||
|
||||
return files[0], nil
|
||||
}
|
||||
|
||||
func GetKernelVersion() (string, error) {
|
||||
var version string
|
||||
|
||||
releaseFile, err := getKernelReleaseFile()
|
||||
if err != nil {
|
||||
return version, err
|
||||
}
|
||||
|
||||
contents, err := os.ReadFile(releaseFile)
|
||||
if err != nil {
|
||||
return version, err
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(contents)), nil
|
||||
}
|
694
main.go
694
main.go
@@ -1,694 +0,0 @@
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"debug/elf"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/archive"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
||||
)
|
||||
|
||||
func timeFunc(start time.Time, name string) {
|
||||
elapsed := time.Since(start)
|
||||
log.Printf("%s completed in: %s", name, elapsed)
|
||||
}
|
||||
|
||||
func main() {
|
||||
deviceinfoFile := "/etc/deviceinfo"
|
||||
if !exists(deviceinfoFile) {
|
||||
log.Print("NOTE: deviceinfo (from device package) not installed yet, " +
|
||||
"not building the initramfs now (it should get built later " +
|
||||
"automatically.)")
|
||||
return
|
||||
}
|
||||
|
||||
devinfo, err := deviceinfo.ReadDeviceinfo(deviceinfoFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
outDir := flag.String("d", "/boot", "Directory to output initfs(-extra) and other boot files")
|
||||
flag.Parse()
|
||||
|
||||
defer timeFunc(time.Now(), "mkinitfs")
|
||||
|
||||
kernVer, err := getKernelVersion()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// temporary working dir
|
||||
workDir, err := ioutil.TempDir("", "mkinitfs")
|
||||
if err != nil {
|
||||
log.Fatal("Unable to create temporary work directory:", err)
|
||||
}
|
||||
defer os.RemoveAll(workDir)
|
||||
|
||||
log.Print("Generating for kernel version: ", kernVer)
|
||||
log.Print("Output directory: ", *outDir)
|
||||
|
||||
if err := generateInitfs("initramfs", workDir, kernVer, devinfo); err != nil {
|
||||
log.Fatal("generateInitfs: ", err)
|
||||
}
|
||||
|
||||
if err := generateInitfsExtra("initramfs-extra", workDir, devinfo); err != nil {
|
||||
log.Fatal("generateInitfsExtra: ", err)
|
||||
}
|
||||
|
||||
// Final processing of initramfs / kernel is done by boot-deploy
|
||||
if err := bootDeploy(workDir, *outDir); err != nil {
|
||||
log.Fatal("bootDeploy: ", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func bootDeploy(workDir string, outDir string) error {
|
||||
// boot-deploy expects the kernel to be in the same dir as initramfs.
|
||||
// Assume that the kernel is in the output dir...
|
||||
log.Print("== Using boot-deploy to finalize/install files ==")
|
||||
kernels, _ := filepath.Glob(filepath.Join(outDir, "vmlinuz*"))
|
||||
if len(kernels) == 0 {
|
||||
return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*"))
|
||||
}
|
||||
|
||||
// Pick a kernel that does not have suffixes added by boot-deploy
|
||||
var kernFile string
|
||||
for _, f := range kernels {
|
||||
if strings.HasSuffix(f, "-dtb") || strings.HasSuffix(f, "-mtk") {
|
||||
continue
|
||||
}
|
||||
kernFile = f
|
||||
break
|
||||
}
|
||||
|
||||
kernFd, err := os.Open(kernFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer kernFd.Close()
|
||||
|
||||
kernFileCopy, err := os.Create(filepath.Join(workDir, "vmlinuz"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(kernFileCopy, kernFd); err != nil {
|
||||
return err
|
||||
}
|
||||
kernFileCopy.Close()
|
||||
|
||||
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
|
||||
cmd := exec.Command("boot-deploy",
|
||||
"-i", "initramfs",
|
||||
"-k", "vmlinuz",
|
||||
"-d", workDir,
|
||||
"-o", outDir,
|
||||
"initramfs-extra")
|
||||
if !exists(cmd.Path) {
|
||||
return errors.New("boot-deploy command not found")
|
||||
}
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
log.Print("'boot-deploy' command failed")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func exists(file string) bool {
|
||||
if _, err := os.Stat(file); err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getHookFiles(filesdir string) misc.StringSet {
|
||||
fileInfo, err := ioutil.ReadDir(filesdir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
files := make(misc.StringSet)
|
||||
for _, file := range fileInfo {
|
||||
path := filepath.Join(filesdir, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
if !exists(s.Text()) {
|
||||
log.Fatalf("Unable to find file %q required by %q", s.Text(), path)
|
||||
}
|
||||
files[s.Text()] = false
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
// Recursively list all dependencies for a given ELF binary
|
||||
func getBinaryDeps(files misc.StringSet, file string) error {
|
||||
// if file is a symlink, resolve dependencies for target
|
||||
fileStat, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
log.Print("getBinaryDeps: failed to stat file")
|
||||
return err
|
||||
}
|
||||
|
||||
// Symlink: write symlink to archive then set 'file' to link target
|
||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||
target, err := os.Readlink(file)
|
||||
if err != nil {
|
||||
log.Print("getBinaryDeps: unable to read symlink: ", file)
|
||||
return err
|
||||
}
|
||||
if !filepath.IsAbs(target) {
|
||||
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := getBinaryDeps(files, target); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// get dependencies for binaries
|
||||
fd, err := elf.Open(file)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
libs, _ := fd.ImportedLibraries()
|
||||
fd.Close()
|
||||
files[file] = false
|
||||
|
||||
if len(libs) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
libdirs := []string{"/usr/lib", "/lib"}
|
||||
for _, lib := range libs {
|
||||
found := false
|
||||
for _, libdir := range libdirs {
|
||||
path := filepath.Join(libdir, lib)
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
err := getBinaryDeps(files, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files[path] = false
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
log.Fatalf("Unable to locate dependency for %q: %s", file, lib)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFiles(files misc.StringSet, newFiles misc.StringSet, required bool) error {
|
||||
for file := range newFiles {
|
||||
err := getFile(files, file, required)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFile(files misc.StringSet, file string, required bool) error {
|
||||
if !exists(file) {
|
||||
if required {
|
||||
return errors.New("getFile: File does not exist :" + file)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
files[file] = false
|
||||
|
||||
// get dependencies for binaries
|
||||
if _, err := elf.Open(file); err != nil {
|
||||
// file is not an elf, so don't resolve lib dependencies
|
||||
return nil
|
||||
}
|
||||
|
||||
err := getBinaryDeps(files, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getOskConfFontPath(oskConfPath string) (string, error) {
|
||||
var path string
|
||||
f, err := os.Open(oskConfPath)
|
||||
if err != nil {
|
||||
return path, err
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
// "key = val" is 3 fields
|
||||
if len(fields) > 2 && fields[0] == "keyboard-font" {
|
||||
path = fields[2]
|
||||
}
|
||||
}
|
||||
if !exists(path) {
|
||||
return path, errors.New("Unable to find font: " + path)
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// Get a list of files and their dependencies related to supporting rootfs full
|
||||
// disk (d)encryption
|
||||
func getFdeFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||
confFiles := misc.StringSet{
|
||||
"/etc/osk.conf": false,
|
||||
"/etc/ts.conf": false,
|
||||
"/etc/pointercal": false,
|
||||
"/etc/fb.modes": false,
|
||||
"/etc/directfbrc": false,
|
||||
}
|
||||
// TODO: this shouldn't be false? though some files (pointercal) don't always exist...
|
||||
if err := getFiles(files, confFiles, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// osk-sdl
|
||||
oskFiles := misc.StringSet{
|
||||
"/usr/bin/osk-sdl": false,
|
||||
"/sbin/cryptsetup": false,
|
||||
"/usr/lib/libGL.so.1": false}
|
||||
if err := getFiles(files, oskFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fontFile, err := getOskConfFontPath("/etc/osk.conf")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files[fontFile] = false
|
||||
|
||||
// Directfb
|
||||
dfbFiles := make(misc.StringSet)
|
||||
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
|
||||
if filepath.Ext(path) == ".so" {
|
||||
dfbFiles[path] = false
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Print("getBinaryDeps: failed to stat file")
|
||||
return err
|
||||
}
|
||||
if err := getFiles(files, dfbFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// tslib
|
||||
tslibFiles := make(misc.StringSet)
|
||||
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
|
||||
if filepath.Ext(path) == ".so" {
|
||||
tslibFiles[path] = false
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Print("getBinaryDeps: failed to stat file")
|
||||
return err
|
||||
}
|
||||
libts, _ := filepath.Glob("/usr/lib/libts*")
|
||||
for _, file := range libts {
|
||||
tslibFiles[file] = false
|
||||
}
|
||||
if err = getFiles(files, tslibFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// mesa hw accel
|
||||
if devinfo.MesaDriver != "" {
|
||||
mesaFiles := misc.StringSet{
|
||||
"/usr/lib/libEGL.so.1": false,
|
||||
"/usr/lib/libGLESv2.so.2": false,
|
||||
"/usr/lib/libgbm.so.1": false,
|
||||
"/usr/lib/libudev.so.1": false,
|
||||
"/usr/lib/xorg/modules/dri/" + devinfo.MesaDriver + "_dri.so": false,
|
||||
}
|
||||
if err := getFiles(files, mesaFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHookScripts(files misc.StringSet) {
|
||||
scripts, _ := filepath.Glob("/etc/postmarketos-mkinitfs/hooks/*.sh")
|
||||
for _, script := range scripts {
|
||||
files[script] = false
|
||||
}
|
||||
}
|
||||
|
||||
func getInitfsExtraFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||
log.Println("== Generating initramfs extra ==")
|
||||
binariesExtra := misc.StringSet{
|
||||
"/lib/libz.so.1": false,
|
||||
"/sbin/dmsetup": false,
|
||||
"/sbin/e2fsck": false,
|
||||
"/usr/sbin/parted": false,
|
||||
"/usr/sbin/resize2fs": false,
|
||||
"/usr/sbin/resize.f2fs": false,
|
||||
}
|
||||
log.Println("- Including extra binaries")
|
||||
if err := getFiles(files, binariesExtra, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists("/usr/bin/osk-sdl") {
|
||||
log.Println("- Including FDE support")
|
||||
if err := getFdeFiles(files, devinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Println("- *NOT* including FDE support")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInitfsFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||
log.Println("== Generating initramfs ==")
|
||||
requiredFiles := misc.StringSet{
|
||||
"/bin/busybox": false,
|
||||
"/bin/sh": false,
|
||||
"/bin/busybox-extras": false,
|
||||
"/usr/sbin/telnetd": false,
|
||||
"/sbin/kpartx": false,
|
||||
"/etc/deviceinfo": false,
|
||||
}
|
||||
|
||||
// Hook files & scripts
|
||||
if exists("/etc/postmarketos-mkinitfs/files") {
|
||||
log.Println("- Including hook files")
|
||||
hookFiles := getHookFiles("/etc/postmarketos-mkinitfs/files")
|
||||
if err := getFiles(files, hookFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Println("- Including hook scripts")
|
||||
getHookScripts(files)
|
||||
|
||||
log.Println("- Including required binaries")
|
||||
if err := getFiles(files, requiredFiles, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInitfsModules(files misc.StringSet, devinfo deviceinfo.DeviceInfo, kernelVer string) error {
|
||||
log.Println("- Including kernel modules")
|
||||
|
||||
modDir := filepath.Join("/lib/modules", kernelVer)
|
||||
if !exists(modDir) {
|
||||
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
||||
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
||||
return nil
|
||||
}
|
||||
|
||||
// modules.* required by modprobe
|
||||
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
|
||||
for _, file := range modprobeFiles {
|
||||
files[file] = false
|
||||
}
|
||||
|
||||
// module name (without extension), or directory (trailing slash is important! globs OK)
|
||||
requiredModules := []string{
|
||||
"loop",
|
||||
"dm-crypt",
|
||||
"kernel/fs/overlayfs/",
|
||||
"kernel/crypto/",
|
||||
"kernel/arch/*/crypto/",
|
||||
}
|
||||
|
||||
for _, item := range requiredModules {
|
||||
dir, file := filepath.Split(item)
|
||||
if file == "" {
|
||||
// item is a directory
|
||||
dir = filepath.Join(modDir, dir)
|
||||
dirs, _ := filepath.Glob(dir)
|
||||
for _, d := range dirs {
|
||||
if err := getModulesInDir(files, d); err != nil {
|
||||
log.Print("Unable to get modules in dir: ", d)
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if dir == "" {
|
||||
// item is a module name
|
||||
if err := getModule(files, file, modDir); err != nil {
|
||||
log.Print("Unable to get module: ", file)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Printf("Unknown module entry: %q", item)
|
||||
}
|
||||
}
|
||||
|
||||
// deviceinfo modules
|
||||
for _, module := range strings.Fields(devinfo.ModulesInitfs) {
|
||||
if err := getModule(files, module, modDir); err != nil {
|
||||
log.Print("Unable to get modules from deviceinfo")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// /etc/postmarketos-mkinitfs/modules/*.modules
|
||||
initfsModFiles, _ := filepath.Glob("/etc/postmarketos-mkinitfs/modules/*.modules")
|
||||
for _, modFile := range initfsModFiles {
|
||||
f, err := os.Open(modFile)
|
||||
if err != nil {
|
||||
log.Print("getInitfsModules: unable to open mkinitfs modules file: ", modFile)
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
if err := getModule(files, s.Text(), modDir); err != nil {
|
||||
log.Print("getInitfsModules: unable to get module file: ", s.Text())
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getKernelReleaseFile() (string, error) {
|
||||
files, _ := filepath.Glob("/usr/share/kernel/*/kernel.release")
|
||||
// only one kernel flavor supported
|
||||
if len(files) != 1 {
|
||||
return "", fmt.Errorf("only one kernel release/flavor is supported, found: %q", files)
|
||||
}
|
||||
|
||||
return files[0], nil
|
||||
}
|
||||
|
||||
func getKernelVersion() (string, error) {
|
||||
var version string
|
||||
|
||||
releaseFile, err := getKernelReleaseFile()
|
||||
if err != nil {
|
||||
return version, err
|
||||
}
|
||||
|
||||
contents, err := os.ReadFile(releaseFile)
|
||||
if err != nil {
|
||||
return version, err
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(contents)), nil
|
||||
}
|
||||
|
||||
func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo.DeviceInfo) error {
|
||||
initfsArchive, err := archive.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requiredDirs := []string{
|
||||
"/bin", "/sbin", "/usr/bin", "/usr/sbin", "/proc", "/sys",
|
||||
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
|
||||
}
|
||||
for _, dir := range requiredDirs {
|
||||
initfsArchive.Dirs[dir] = false
|
||||
}
|
||||
|
||||
if err := getInitfsFiles(initfsArchive.Files, devinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := getInitfsModules(initfsArchive.Files, devinfo, kernVer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// splash images
|
||||
log.Println("- Including splash images")
|
||||
splashFiles, _ := filepath.Glob("/usr/share/postmarketos-splashes/*.ppm.gz")
|
||||
for _, file := range splashFiles {
|
||||
// splash images are expected at /<file>
|
||||
if err := initfsArchive.AddFile(file, filepath.Join("/", filepath.Base(file))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// initfs_functions
|
||||
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Println("- Writing and verifying initramfs archive")
|
||||
if err := initfsArchive.Write(filepath.Join(path, name), os.FileMode(0644)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateInitfsExtra(name string, path string, devinfo deviceinfo.DeviceInfo) error {
|
||||
initfsExtraArchive, err := archive.New()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := getInitfsExtraFiles(initfsExtraArchive.Files, devinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Println("- Writing and verifying initramfs-extra archive")
|
||||
if err := initfsExtraArchive.Write(filepath.Join(path, name), os.FileMode(0644)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func stripExts(file string) string {
|
||||
return strings.Split(file, ".")[0]
|
||||
}
|
||||
|
||||
func getModulesInDir(files misc.StringSet, modPath string) error {
|
||||
err := filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error {
|
||||
// TODO: need to support more extensions?
|
||||
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
|
||||
return nil
|
||||
}
|
||||
files[path] = false
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
|
||||
// file and all of its dependencies.
|
||||
// Note: it's not necessarily fatal if the module is not found, since it may
|
||||
// have been built into the kernel
|
||||
// TODO: look for it in modules.builtin, and make it fatal if it can't be found
|
||||
// anywhere
|
||||
func getModule(files misc.StringSet, modName string, modDir string) error {
|
||||
|
||||
modDep := filepath.Join(modDir, "modules.dep")
|
||||
if !exists(modDep) {
|
||||
log.Fatal("Kernel module.dep not found: ", modDir)
|
||||
}
|
||||
|
||||
fd, err := os.Open(modDep)
|
||||
if err != nil {
|
||||
log.Print("Unable to open modules.dep: ", modDep)
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
deps, err := getModuleDeps(modName, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, dep := range deps {
|
||||
p := filepath.Join(modDir, dep)
|
||||
if !exists(p) {
|
||||
log.Print(fmt.Sprintf("Tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p))
|
||||
return err
|
||||
}
|
||||
files[p] = false
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the canonicalized name for the module as represented in the given modules.dep io.reader
|
||||
func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
|
||||
var deps []string
|
||||
|
||||
// split the module name on - and/or _, build a regex for matching
|
||||
splitRe := regexp.MustCompile("[-_]+")
|
||||
modNameReStr := splitRe.ReplaceAllString(modName, "[-_]+")
|
||||
re := regexp.MustCompile("^" + modNameReStr + "$")
|
||||
|
||||
s := bufio.NewScanner(modulesDep)
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
if len(fields) == 0 {
|
||||
continue
|
||||
}
|
||||
fields[0] = strings.TrimSuffix(fields[0], ":")
|
||||
|
||||
found := re.FindAll([]byte(filepath.Base(stripExts(fields[0]))), -1)
|
||||
if len(found) > 0 {
|
||||
deps = append(deps, fields...)
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
log.Print("Unable to get module + dependencies: ", modName)
|
||||
return deps, err
|
||||
}
|
||||
|
||||
return deps, nil
|
||||
}
|
@@ -1,225 +0,0 @@
|
||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"github.com/cavaliercoder/go-cpio"
|
||||
"github.com/klauspost/pgzip"
|
||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Archive struct {
|
||||
Dirs misc.StringSet
|
||||
Files misc.StringSet
|
||||
cpioWriter *cpio.Writer
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
func New() (*Archive, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
archive := &Archive{
|
||||
cpioWriter: cpio.NewWriter(buf),
|
||||
Files: make(misc.StringSet),
|
||||
Dirs: make(misc.StringSet),
|
||||
buf: buf,
|
||||
}
|
||||
|
||||
return archive, nil
|
||||
}
|
||||
|
||||
func (archive *Archive) Write(path string, mode os.FileMode) error {
|
||||
if err := archive.writeCpio(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := archive.cpioWriter.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write archive to path
|
||||
if err := archive.writeCompressed(path, mode); err != nil {
|
||||
log.Print("Unable to write archive to location: ", path)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) AddFile(file string, dest string) error {
|
||||
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if archive.Files[file] {
|
||||
// Already written to cpio
|
||||
return nil
|
||||
}
|
||||
|
||||
fileStat, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
log.Print("AddFile: failed to stat file: ", file)
|
||||
return err
|
||||
}
|
||||
|
||||
// Symlink: write symlink to archive then set 'file' to link target
|
||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||
// log.Printf("File %q is a symlink", file)
|
||||
target, err := os.Readlink(file)
|
||||
if err != nil {
|
||||
log.Print("AddFile: failed to get symlink target: ", file)
|
||||
return err
|
||||
}
|
||||
|
||||
destFilename := strings.TrimPrefix(dest, "/")
|
||||
hdr := &cpio.Header{
|
||||
Name: destFilename,
|
||||
Linkname: target,
|
||||
Mode: 0644 | cpio.ModeSymlink,
|
||||
Size: int64(len(target)),
|
||||
// Checksum: 1,
|
||||
}
|
||||
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = archive.cpioWriter.Write([]byte(target)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
archive.Files[file] = true
|
||||
if filepath.Dir(target) == "." {
|
||||
target = filepath.Join(filepath.Dir(file), target)
|
||||
}
|
||||
// make sure target is an absolute path
|
||||
if !filepath.IsAbs(target) {
|
||||
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// TODO: add verbose mode, print stuff like this:
|
||||
// log.Printf("symlink: %q, target: %q", file, target)
|
||||
// write symlink target
|
||||
err = archive.AddFile(target, target)
|
||||
return err
|
||||
}
|
||||
|
||||
// log.Printf("writing file: %q", file)
|
||||
|
||||
fd, err := os.Open(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
destFilename := strings.TrimPrefix(dest, "/")
|
||||
hdr := &cpio.Header{
|
||||
Name: destFilename,
|
||||
Mode: cpio.FileMode(fileStat.Mode().Perm()),
|
||||
Size: fileStat.Size(),
|
||||
// Checksum: 1,
|
||||
}
|
||||
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(archive.cpioWriter, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
archive.Files[file] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) writeCompressed(path string, mode os.FileMode) error {
|
||||
// TODO: support other compression formats, based on deviceinfo
|
||||
fd, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gz, err := pgzip.NewWriterLevel(fd, flate.BestSpeed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(gz, archive.buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := gz.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// call fsync just to be sure
|
||||
if err := fd.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) writeCpio() error {
|
||||
// Write any dirs added explicitly
|
||||
for dir := range archive.Dirs {
|
||||
archive.addDir(dir)
|
||||
}
|
||||
|
||||
// Write files and any missing parent dirs
|
||||
for file, imported := range archive.Files {
|
||||
if imported {
|
||||
continue
|
||||
}
|
||||
if err := archive.AddFile(file, file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (archive *Archive) addDir(dir string) error {
|
||||
if archive.Dirs[dir] {
|
||||
// Already imported
|
||||
return nil
|
||||
}
|
||||
if dir == "/" {
|
||||
dir = "."
|
||||
}
|
||||
|
||||
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
|
||||
for i, subdir := range subdirs {
|
||||
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
|
||||
if archive.Dirs[path] {
|
||||
// Subdir already imported
|
||||
continue
|
||||
}
|
||||
err := archive.cpioWriter.WriteHeader(&cpio.Header{
|
||||
Name: path,
|
||||
Mode: cpio.ModeDir | 0755,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
archive.Dirs[path] = true
|
||||
// log.Print("wrote dir: ", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@@ -14,30 +14,11 @@ import (
|
||||
)
|
||||
|
||||
type DeviceInfo struct {
|
||||
AppendDtb string
|
||||
Arch string
|
||||
BootimgAppendSEAndroidEnforce string
|
||||
BootimgBlobpack string
|
||||
BootimgDtbSecond string
|
||||
BootimgMtkMkimage string
|
||||
BootimgPxa string
|
||||
BootimgQcdt string
|
||||
Dtb string
|
||||
FlashKernelOnUpdate string
|
||||
FlashOffsetBase string
|
||||
FlashOffsetKernel string
|
||||
FlashOffsetRamdisk string
|
||||
FlashOffsetSecond string
|
||||
FlashOffsetTags string
|
||||
FlashPagesize string
|
||||
GenerateBootimg string
|
||||
GenerateLegacyUbootInitfs string
|
||||
InitfsCompression string
|
||||
KernelCmdline string
|
||||
LegacyUbootLoadAddress string
|
||||
MesaDriver string
|
||||
MkinitfsPostprocess string
|
||||
ModulesInitfs string
|
||||
InitfsCompression string
|
||||
InitfsExtraCompression string
|
||||
MesaDriver string
|
||||
ModulesInitfs string
|
||||
UbootBoardname string
|
||||
}
|
||||
|
||||
func ReadDeviceinfo(file string) (DeviceInfo, error) {
|
||||
@@ -119,7 +100,10 @@ func nameToField(name string) string {
|
||||
if p == "deviceinfo" {
|
||||
continue
|
||||
}
|
||||
field = field + strings.Title(p)
|
||||
if len(p) < 1 {
|
||||
continue
|
||||
}
|
||||
field = field + strings.ToUpper(p[:1]) + p[1:]
|
||||
}
|
||||
|
||||
return field
|
||||
|
@@ -21,6 +21,7 @@ func TestNameToField(t *testing.T) {
|
||||
{"deviceinfo_modules_initfs", "ModulesInitfs"},
|
||||
{"modules_initfs", "ModulesInitfs"},
|
||||
{"deviceinfo_modules_initfs___", "ModulesInitfs"},
|
||||
{"deviceinfo_initfs_extra_compression", "InitfsExtraCompression"},
|
||||
}
|
||||
|
||||
for _, table := range tables {
|
||||
@@ -43,15 +44,9 @@ func TestUnmarshal(t *testing.T) {
|
||||
{"ModulesInitfs", "deviceinfo_modules_initfs=\"panfrost foo bar bazz\"\n", "panfrost foo bar bazz"},
|
||||
{"ModulesInitfs", "deviceinfo_modules_initfs=\"panfrost foo bar bazz\"", "panfrost foo bar bazz"},
|
||||
// line with multiple '='
|
||||
{"KernelCmdline",
|
||||
"deviceinfo_kernel_cmdline=\"PMOS_NO_OUTPUT_REDIRECT fw_devlink=off nvme_core.default_ps_max_latency_us=5500 pcie_aspm.policy=performance\"\n",
|
||||
"PMOS_NO_OUTPUT_REDIRECT fw_devlink=off nvme_core.default_ps_max_latency_us=5500 pcie_aspm.policy=performance"},
|
||||
{"InitfsCompression", "deviceinfo_initfs_compression=zstd:--foo=1 -T0 --bar=bazz", "zstd:--foo=1 -T0 --bar=bazz"},
|
||||
// empty option
|
||||
{"ModulesInitfs", "deviceinfo_modules_initfs=\"\"\n", ""},
|
||||
{"Dtb", "deviceinfo_dtb=\"freescale/imx8mq-librem5-r2 freescale/imx8mq-librem5-r3 freescale/imx8mq-librem5-r4\"\n",
|
||||
"freescale/imx8mq-librem5-r2 freescale/imx8mq-librem5-r3 freescale/imx8mq-librem5-r4"},
|
||||
// valid deviceinfo line, just not used in this module
|
||||
{"", "deviceinfo_codename=\"pine64-pinebookpro\"", ""},
|
||||
// line with comment at the end
|
||||
{"MesaDriver", "deviceinfo_mesa_driver=\"panfrost\" # this is a nice driver", "panfrost"},
|
||||
{"", "# this is a comment!\n", ""},
|
||||
|
Reference in New Issue
Block a user