3 Commits
2.2 ... 1.1.2

Author SHA1 Message Date
Clayton Craft
2104f9adcb bootDeploy: ignore suffixes added by boot-deploy when copying kernel (MR 11)
The glob might result in a vmlinuz-* filename that causes mkinitfs to
copy a modified kernel file to the boot-deploy working directory. This
excludes files that boot-deploy has touched from being copied/used by
boot-deploy

(cherry picked from commit 0925cbd8ac)
2021-10-03 18:14:15 -07:00
Minecrell
d62180f9d6 getModuleDeps: replace Split() loop with ReplaceAllString() (MR 12)
This should do the same as far as I can tell :)

(cherry picked from commit 866f17b86f)
2021-09-20 15:16:38 -07:00
Minecrell
b7f02ff970 getModulesDep: disallow regex submatches (MR 12)
At the moment modules in modules.dep are matched even on a submatch
e.g. looking up "msm" ends up matching "snd-soc-msm8916-digital.ko"
instead of "msm.ko". To fix this, disallow submatches using ^ and $.

(cherry picked from commit 15e99c3658)
2021-09-20 15:16:33 -07:00
30 changed files with 1018 additions and 2502 deletions

6
.gitignore vendored
View File

@@ -1,5 +1 @@
/*.1
/*.tar.gz
/*.sha512
/mkinitfs
/vendor
/postmarketos-mkinitfs

View File

@@ -3,15 +3,9 @@
# global settings
image: alpine:edge
variables:
GOFLAGS: "-buildvcs=false"
PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/mkinitfs-vendor-${CI_COMMIT_TAG}/${CI_COMMIT_TAG}"
stages:
- lint
- build
- vendor
- release
# defaults for "only"
# We need to run the CI jobs in a "merge request specific context", if CI is
@@ -27,37 +21,25 @@ stages:
- merge_requests
- tags
# device documentation
gofmt linting:
stage: lint
allow_failure: true
<<: *only-default
before_script:
# specific mirror used because staticcheck hasn't made it to the other mirrors yet...
- apk -q update --repository http://dl-4.alpinelinux.org/alpine/edge/testing
- apk -q add --repository http://dl-4.alpinelinux.org/alpine/edge/testing go staticcheck
script:
- .gitlab-ci/check_linting.sh
build:
stage: build
<<: *only-default
before_script:
- apk -q add go staticcheck make scdoc
- apk -q add go
script:
- make test
- make
- go build -v
- go test ./...
artifacts:
expire_in: 1 week
vendor:
stage: vendor
image: alpine:latest
only:
- tags
before_script:
- apk -q add curl go make
script:
- |
make VERSION="${CI_COMMIT_TAG}" vendor
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file "mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz" "${PACKAGE_REGISTRY_URL}/"
curl --header "JOB-TOKEN: ${CI_JOB_TOKEN}" --upload-file "mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512" "${PACKAGE_REGISTRY_URL}/"
release:
stage: release
image: registry.gitlab.com/gitlab-org/release-cli:latest
only:
- tags
script:
- |
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \
--assets-link "{\"name\":\"mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz\",\"url\":\"${PACKAGE_REGISTRY_URL}/mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz\"}" \
--assets-link "{\"name\":\"mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512\",\"url\":\"${PACKAGE_REGISTRY_URL}/mkinitfs-vendor-${CI_COMMIT_TAG}.tar.gz.sha512\"}"

13
.gitlab-ci/check_linting.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/sh
echo "### Running gofmt..."
files="$(gofmt -l .)"
if [ ! -z "$files" ]; then
# run gofmt to print out the diff of what needs to be changed
gofmt -d -e .
exit 1
fi
echo "### Running staticcheck..."
staticcheck ./...

View File

@@ -1,74 +0,0 @@
.POSIX:
.SUFFIXES: .1 .1.scd
VERSION?=$(shell git describe --tags --dirty 2>/dev/null || echo 0.0.0)
VPATH=doc
VENDORED="mkinitfs-vendor-$(VERSION)"
PREFIX?=/usr/local
BINDIR?=$(PREFIX)/sbin
MANDIR?=$(PREFIX)/share/man
SHAREDIR?=$(PREFIX)/share
GO?=go
GOFLAGS?=
LDFLAGS+=-s -w -X main.Version=$(VERSION)
RM?=rm -f
GOTEST=go test -count=1 -race
GOSRC!=find * -name '*.go'
GOSRC+=go.mod go.sum
DOCS := \
mkinitfs.1
all: mkinitfs $(DOCS)
mkinitfs: $(GOSRC)
$(GO) build $(GOFLAGS) -ldflags "$(LDFLAGS)" -o mkinitfs ./cmd/mkinitfs
.1.scd.1:
scdoc < $< > $@
doc: $(DOCS)
.PHONY: fmt
fmt:
gofmt -w .
test:
@if [ `gofmt -l . | wc -l` -ne 0 ]; then \
gofmt -d .; \
echo "ERROR: source files need reformatting with gofmt"; \
exit 1; \
fi
@staticcheck ./...
@$(GOTEST) ./...
clean:
$(RM) mkinitfs $(DOCS)
$(RM) $(VENDORED)*
install: $(DOCS) mkinitfs
install -Dm755 mkinitfs -t $(DESTDIR)$(BINDIR)/
install -Dm644 mkinitfs.1 -t $(DESTDIR)$(MANDIR)/man1/
.PHONY: checkinstall
checkinstall:
test -e $(DESTDIR)$(BINDIR)/mkinitfs
test -e $(DESTDIR)$(MANDIR)/man1/mkinitfs.1
RMDIR_IF_EMPTY:=sh -c '! [ -d $$0 ] || ls -1qA $$0 | grep -q . || rmdir $$0'
vendor:
go mod vendor
tar czf $(VENDORED).tar.gz vendor/
sha512sum $(VENDORED).tar.gz > $(VENDORED).tar.gz.sha512
$(RM) -rf vendor
uninstall:
$(RM) $(DESTDIR)$(BINDIR)/mkinitfs
${RMDIR_IF_EMPTY} $(DESTDIR)$(BINDIR)
$(RM) $(DESTDIR)$(MANDIR)/man1/mkinitfs.1
$(RMDIR_IF_EMPTY) $(DESTDIR)$(MANDIR)/man1
.PHONY: all clean install uninstall test vendor

View File

@@ -1,48 +0,0 @@
`mkinitfs` is a tool for generating an initramfs. It was originally designed
for postmarketOS, but a long term design goal is to be as distro-agnostic as
possible. It's capable of generating a split initramfs, in the style used by
postmarketOS, and supports running `boot-deploy` to install/finalize boot files
on a device.
## Building
Building this project requires a Go compiler/toolchain and `make`:
```
$ make
```
To install locally:
```
$ make install
```
Installation prefix can be set in the generally accepted way with setting
`PREFIX`:
```
$ make PREFIX=/some/location
# make PREFIX=/some/location install
```
Other paths can be modified from the command line as well, see the top section of
the `Makefile` for more information.
Tests (functional and linting) can be executed by using the `test` make target:
```
$ make test
```
## Usage
The tool can be run with no options:
```
# mkinitfs
```
Configuration is done through a series of flat text files that list directories
and files, and by placing scripts in specific directories. See `man 1 mkinitfs`
for more information.

View File

@@ -1,165 +0,0 @@
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package main
import (
"flag"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"time"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/archive"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/bootdeploy"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookdirs"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookfiles"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/hookscripts"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/initramfs"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/modules"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist/osksdl"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
)
// set at build time
var Version string
func main() {
retCode := 0
defer func() { os.Exit(retCode) }()
outDir := flag.String("d", "/boot", "Directory to output initfs(-extra) and other boot files")
var showVersion bool
flag.BoolVar(&showVersion, "version", false, "Print version and quit.")
var disableBootDeploy bool
flag.BoolVar(&disableBootDeploy, "no-bootdeploy", false, "Disable running 'boot-deploy' after generating archives.")
flag.Parse()
if showVersion {
fmt.Printf("%s - %s\n", filepath.Base(os.Args[0]), Version)
return
}
log.Default().SetFlags(log.Lmicroseconds)
var devinfo deviceinfo.DeviceInfo
deverr_usr := devinfo.ReadDeviceinfo("/usr/share/deviceinfo/deviceinfo")
deverr_etc := devinfo.ReadDeviceinfo("/etc/deviceinfo")
if deverr_etc != nil && deverr_usr != nil {
log.Println("Error reading deviceinfo")
log.Println("\t/usr/share/deviceinfo/deviceinfo:", deverr_usr)
log.Println("\t/etc/deviceinfo:", deverr_etc)
retCode = 1
return
}
defer misc.TimeFunc(time.Now(), "mkinitfs")
kernVer, err := osutil.GetKernelVersion()
if err != nil {
log.Println(err)
retCode = 1
return
}
// temporary working dir
workDir, err := os.MkdirTemp("", "mkinitfs")
if err != nil {
log.Println(err)
log.Println("unable to create temporary work directory")
retCode = 1
return
}
defer func() {
e := os.RemoveAll(workDir)
if e != nil && err == nil {
err = e
retCode = 1
}
}()
log.Print("Generating for kernel version: ", kernVer)
log.Print("Output directory: ", *outDir)
//
// initramfs
//
// deviceinfo.InitfsCompression needs a little more post-processing
compressionFormat, compressionLevel := archive.ExtractFormatLevel(devinfo.InitfsCompression)
log.Printf("== Generating %s ==\n", "initramfs")
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
start := time.Now()
initramfsAr := archive.New(compressionFormat, compressionLevel)
initfs := initramfs.New([]filelist.FileLister{
hookdirs.New("/usr/share/mkinitfs/dirs"),
hookdirs.New("/etc/mkinitfs/dirs"),
hookfiles.New("/usr/share/mkinitfs/files"),
hookfiles.New("/etc/mkinitfs/files"),
hookscripts.New("/usr/share/mkinitfs/hooks", "/hooks"),
hookscripts.New("/etc/mkinitfs/hooks", "/hooks"),
modules.New(strings.Fields(devinfo.ModulesInitfs), "/usr/share/mkinitfs/modules"),
modules.New([]string{}, "/etc/mkinitfs/modules"),
})
initramfsAr.AddItems(initfs)
if err := initramfsAr.Write(filepath.Join(workDir, "initramfs"), os.FileMode(0644)); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs")
retCode = 1
return
}
misc.TimeFunc(start, "initramfs")
//
// initramfs-extra
//
// deviceinfo.InitfsExtraCompression needs a little more post-processing
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
log.Printf("== Generating %s ==\n", "initramfs-extra")
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
start = time.Now()
initramfsExtraAr := archive.New(compressionFormat, compressionLevel)
initfsExtra := initramfs.New([]filelist.FileLister{
hookfiles.New("/usr/share/mkinitfs/files-extra"),
hookfiles.New("/etc/mkinitfs/files-extra"),
hookscripts.New("/usr/share/mkinitfs/hooks-extra", "/hooks-extra"),
hookscripts.New("/etc/mkinitfs/hooks-extra", "/hooks-extra"),
modules.New([]string{}, "/usr/share/mkinitfs/modules-extra"),
modules.New([]string{}, "/etc/mkinitfs/modules-extra"),
osksdl.New(devinfo.MesaDriver),
})
initramfsExtraAr.AddItemsExclude(initfsExtra, initfs)
if err := initramfsExtraAr.Write(filepath.Join(workDir, "initramfs-extra"), os.FileMode(0644)); err != nil {
log.Println(err)
log.Println("failed to generate: ", "initramfs-extra")
retCode = 1
return
}
misc.TimeFunc(start, "initramfs-extra")
// Final processing of initramfs / kernel is done by boot-deploy
if !disableBootDeploy {
if err := bootDeploy(workDir, *outDir, devinfo.UbootBoardname); err != nil {
log.Println(err)
log.Println("boot-deploy failed")
retCode = 1
return
}
}
}
func bootDeploy(workDir, outDir, ubootBoardname string) error {
log.Print("== Using boot-deploy to finalize/install files ==")
defer misc.TimeFunc(time.Now(), "boot-deploy")
bd := bootdeploy.New(workDir, outDir, ubootBoardname)
return bd.Run()
}

View File

@@ -1,156 +0,0 @@
mkinitfs(1) "mkinitfs"
# NAME
mkinitfs
# DESCRIPTION
mkinitfs is a simple, generic tool for generating an initramfs, primarily
developed for use in postmarketOS
# CONCEPTS
mkinitfs is designed to generate two archives, "initramfs" and
"initramfs-extra", however it's possible to configure mkinitfs to run without
generating an initramfs-extra archive. mkinitfs is primarily configured through
the placement of files in specific directories detailed below in the
*DIRECTORIES* section. *deviceinfo* files are also used to provide other
configuration options to mkinitfs, these are covered under the *DEVICEINFO*
section below.
mkinitfs does not provide an init script, or any boot-time logic, it's purpose
is purely to generate the archive(s). mkinitfs does call *boot-deploy* after
creating the archive(s), in order to install/deploy them and any other relevant
boot-related items onto the system.
Design goals of this project are:
- Support as many distros as possible
- Simplify configuration, while still giving multiple opportunities to set or override defaults
- Execute an external app to do any boot install/setup finalization
- One such app is here: https://gitlab.com/postmarketOS/boot-deploy
- But implementation can be anything, see the section on *BOOT-DEPLOY*
for more info
# DEVICEINFO
The canonical deviceinfo "specification" is at
https://wiki.postmarketos.org/wiki/Deviceinfo_reference
mkinitfs reads deviceinfo values from */usr/share/deviceinfo/deviceinfo* and
*/etc/deviceinfo*, in that order. The following variables
are *required* by mkinitfs:
- deviceinfo_initfs_compression
- deviceinfo_initfs_extra_compression
- deviceinfo_mesa_driver
- deviceinfo_modules_initfs
- deviceinfo_uboot_boardname
It is a design goal to keep the number of required variables from deviceinfo to
a bare minimum, and to require only variables that don't hold lists of things.
*NOTE*: When deviceinfo_initfs_extra_compression is set, make sure that the
necessary tools to extract the configured archive format are in the initramfs
archive.
# DIRECTORIES
The following directories are used by mkinitfs to generate the initramfs and
initramfs-extra archives. Directories that end in *-extra* indicate directories
that are used for constructing the initramfs-extra archive, while those without
it are for constructing the initramfs archive.
Configuration under */usr/share/mkinitfs* is intended to be managed by
distributions, while configuration under */etc/mkinitfs* is for users to
create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, and then from */etc/mkinitfs*.
## /usr/share/mkinitfs/files, /etc/mkinitfs/files
## /usr/share/mkinitfs/files-extra, /etc/mkinitfs/files-extra
Files with the *.files* extension are read as a list of
files/directories. Each line is in the format:
```
<source path>:<destination path>
```
The source path is the location, at runtime, of the file or directory
which will be copied to the destination path within the initramfs
archive. Specifying a destination path, with *:<destination path>* is
optional. If it is omitted, then the source path will be used as the
destination path within the archive. The source and destination paths
are delimited by a *:* (colon.) Destination path is ignored if the source
path is a glob that returns more than 1 file. This may change in the future.
[[ *Line in .files*
:< Comment
| */usr/share/bazz*
: File or directory */usr/share/bazz* would be added to the archive under */usr/share/bazz*
| */usr/share/bazz:/bazz*
: File or directory */usr/share/bazz* would be added to the archive under */bazz*
| */root/something/\**
: Everything under */root/something* would be added to the archive under */root/something*
| */etc/foo/\*/bazz:/foo*
: Anything that matches the glob will be installed under the source path in the archive. For example, */etc/foo/bar/bazz* would be installed at */etc/foo/bar/bazz* in the archive. The destination path is ignored.
It's possible to overwrite file/directory destinations from
configuration in */usr/share/mkinitfs* by specifying the same source
path(s) under the relevant directory in */etc/mkinitfs*, and changing
the destination path.
## /usr/share/mkinitfs/hooks, /etc/mkinitfs/hooks
## /usr/share/mkinitfs/hooks-extra*, /etc/mkinitfs/hooks-extra
Any files listed under these directories are copied as-is into the
relevant archives. Hooks are generally script files, but how they are
treated in the initramfs is entirely up to whatever init script is run
there on boot.
Hooks are installed in the initramfs under the */hooks* directory, and
under */hooks-extra* for the initramfs-extra.
## /usr/share/mkinitfs/modules, /etc/mkinitfs/modules
## /usr/share/mkinitfs/modules-extra, /etc/mkinitfs/modules-extra
Files with the *.modules* extention in these directories are lists of
kernel modules to include in the initramfs. Individual modules and
directories can be listed in the files here. Globbing is also supported.
Modules are installed in the initramfs archive under the same path they
exist on the system where mkinitfs is executed.
## /usr/share/mkinitfs/dirs, /etc/mkinitfs/dirs
Files with the *.dirs* extension in these directories are lists of
directories to create within the initramfs. There is no *-extra* variant,
since directories are of negligible size.
# BOOT-DEPLOY
After generating archives, mkinitfs will execute *boot-deploy*, using *$PATH* to
search for the app. The following commandline options are passed to it:
*-i* <initramfs filename>
Currently this is hardcoded to be "initramfs"
*-k* <kernel filename>
*-d* <work directory>
Path to the directory containing the build artifacts from mkinitfs.
*-o* <destination directory>
Path to the directory that boot-deploy should use as its root when
installing files.
*initramfs-extra*
This string is the filename of the initramfs-extra archive.
# AUTHORS
*Clayton Craft* <clayton@craftyguy.net>

7
go.mod
View File

@@ -1,11 +1,10 @@
module gitlab.com/postmarketOS/postmarketos-mkinitfs
go 1.20
go 1.16
require (
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e
github.com/klauspost/compress v1.15.12
github.com/pierrec/lz4/v4 v4.1.17
github.com/ulikunitz/xz v0.5.10
github.com/klauspost/compress v1.13.3 // indirect
github.com/klauspost/pgzip v1.2.5
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
)

11
go.sum
View File

@@ -1,10 +1,9 @@
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RSSp2Om9lubZpiMgVbvn39bsUmW9U5h0twqc=
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/klauspost/compress v1.13.3 h1:BtAvtV1+h0YwSVwWoYXMREPpYu9VzTJ9QDI1TEg/iQQ=
github.com/klauspost/compress v1.13.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@@ -1,466 +0,0 @@
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package archive
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"log"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"syscall"
"github.com/cavaliercoder/go-cpio"
"github.com/klauspost/compress/zstd"
"github.com/pierrec/lz4/v4"
"github.com/ulikunitz/xz"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
)
type CompressFormat string
const (
FormatGzip CompressFormat = "gzip"
FormatLzma CompressFormat = "lzma"
FormatLz4 CompressFormat = "lz4"
FormatZstd CompressFormat = "zstd"
FormatNone CompressFormat = "none"
)
type CompressLevel string
const (
// Mapped to the "default" level for the given format
LevelDefault CompressLevel = "default"
// Maps to the fastest compression level for the given format
LevelFast CompressLevel = "fast"
// Maps to the best compression level for the given format
LevelBest CompressLevel = "best"
)
type Archive struct {
cpioWriter *cpio.Writer
buf *bytes.Buffer
compress_format CompressFormat
compress_level CompressLevel
items archiveItems
}
func New(format CompressFormat, level CompressLevel) *Archive {
buf := new(bytes.Buffer)
archive := &Archive{
cpioWriter: cpio.NewWriter(buf),
buf: buf,
compress_format: format,
compress_level: level,
}
return archive
}
type archiveItem struct {
header *cpio.Header
sourcePath string
}
type archiveItems struct {
items []archiveItem
sync.RWMutex
}
// ExtractFormatLevel parses the given string in the format format[:level],
// where :level is one of CompressLevel consts. If level is omitted from the
// string, or if it can't be parsed, the level is set to the default level for
// the given format. If format is unknown, gzip is selected. This function is
// designed to always return something usable within this package.
func ExtractFormatLevel(s string) (format CompressFormat, level CompressLevel) {
f, l, found := strings.Cut(s, ":")
if !found {
l = "default"
}
level = CompressLevel(strings.ToLower(l))
format = CompressFormat(strings.ToLower(f))
switch level {
}
switch level {
case LevelBest:
case LevelDefault:
case LevelFast:
default:
log.Print("Unknown or no compression level set, using default")
level = LevelDefault
}
switch format {
case FormatGzip:
case FormatLzma:
log.Println("Format lzma doesn't support a compression level, using default settings")
level = LevelDefault
case FormatLz4:
case FormatNone:
case FormatZstd:
default:
log.Print("Unknown or no compression format set, using gzip")
format = FormatGzip
}
return
}
// Adds the given item to the archiveItems, only if it doesn't already exist in
// the list. The items are kept sorted in ascending order.
func (a *archiveItems) add(item archiveItem) {
a.Lock()
defer a.Unlock()
if len(a.items) < 1 {
// empty list
a.items = append(a.items, item)
return
}
// find existing item, or index of where new item should go
i := sort.Search(len(a.items), func(i int) bool {
return strings.Compare(item.header.Name, a.items[i].header.Name) <= 0
})
if i >= len(a.items) {
// doesn't exist in list, but would be at the very end
a.items = append(a.items, item)
return
}
if strings.Compare(a.items[i].header.Name, item.header.Name) == 0 {
// already in list
return
}
// grow list by 1, shift right at index, and insert new string at index
a.items = append(a.items, archiveItem{})
copy(a.items[i+1:], a.items[i:])
a.items[i] = item
}
// iterate through items and send each one over the returned channel
func (a *archiveItems) IterItems() <-chan archiveItem {
ch := make(chan archiveItem)
go func() {
a.RLock()
defer a.RUnlock()
for _, item := range a.items {
ch <- item
}
close(ch)
}()
return ch
}
func (archive *Archive) Write(path string, mode os.FileMode) error {
if err := archive.writeCpio(); err != nil {
return err
}
if err := archive.cpioWriter.Close(); err != nil {
return fmt.Errorf("archive.Write: error closing archive: %w", err)
}
// Write archive to path
if err := archive.writeCompressed(path, mode); err != nil {
return fmt.Errorf("unable to write archive to location %q: %w", path, err)
}
if err := os.Chmod(path, mode); err != nil {
return fmt.Errorf("unable to chmod %q to %s: %w", path, mode, err)
}
return nil
}
// AddItems adds the given items in the map to the archive. The map format is
// {source path:dest path}. Internally this just calls AddItem on each
// key,value pair in the map.
func (archive *Archive) AddItems(flister filelist.FileLister) error {
list, err := flister.List()
if err != nil {
return err
}
for i := range list.IterItems() {
if err := archive.AddItem(i.Source, i.Dest); err != nil {
return err
}
}
return nil
}
// AddItemsExclude is like AddItems, but takes a second FileLister that lists
// items that should not be added to the archive from the first FileLister
func (archive *Archive) AddItemsExclude(flister filelist.FileLister, exclude filelist.FileLister) error {
list, err := flister.List()
if err != nil {
return err
}
excludeList, err := exclude.List()
if err != nil {
return err
}
for i := range list.IterItems() {
dest, found := excludeList.Get(i.Source)
if found {
if i.Dest != dest {
found = false
}
}
if !found {
if err := archive.AddItem(i.Source, i.Dest); err != nil {
return err
}
}
}
return nil
}
// Adds the given file or directory at "source" to the archive at "dest"
func (archive *Archive) AddItem(source string, dest string) error {
sourceStat, err := os.Lstat(source)
if err != nil {
e, ok := err.(*os.PathError)
if e.Err == syscall.ENOENT && ok {
// doesn't exist in current filesystem, assume it's a new directory
return archive.addDir(dest)
}
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
}
if sourceStat.Mode()&os.ModeDir != 0 {
return archive.addDir(dest)
}
return archive.addFile(source, dest)
}
func (archive *Archive) addFile(source string, dest string) error {
if err := archive.addDir(filepath.Dir(dest)); err != nil {
return err
}
sourceStat, err := os.Lstat(source)
if err != nil {
log.Print("addFile: failed to stat file: ", source)
return err
}
// Symlink: write symlink to archive then set 'file' to link target
if sourceStat.Mode()&os.ModeSymlink != 0 {
// log.Printf("File %q is a symlink", file)
target, err := os.Readlink(source)
if err != nil {
log.Print("addFile: failed to get symlink target: ", source)
return err
}
destFilename := strings.TrimPrefix(dest, "/")
archive.items.add(archiveItem{
sourcePath: source,
header: &cpio.Header{
Name: destFilename,
Linkname: target,
Mode: 0644 | cpio.ModeSymlink,
Size: int64(len(target)),
// Checksum: 1,
},
})
if filepath.Dir(target) == "." {
target = filepath.Join(filepath.Dir(source), target)
}
// make sure target is an absolute path
if !filepath.IsAbs(target) {
target, err = osutil.RelativeSymlinkTargetToDir(target, filepath.Dir(source))
if err != nil {
return err
}
}
err = archive.addFile(target, target)
return err
}
destFilename := strings.TrimPrefix(dest, "/")
archive.items.add(archiveItem{
sourcePath: source,
header: &cpio.Header{
Name: destFilename,
Mode: cpio.FileMode(sourceStat.Mode().Perm()),
Size: sourceStat.Size(),
// Checksum: 1,
},
})
return nil
}
func (archive *Archive) writeCompressed(path string, mode os.FileMode) (err error) {
var compressor io.WriteCloser
defer func() {
e := compressor.Close()
if e != nil && err == nil {
err = e
}
}()
fd, err := os.Create(path)
if err != nil {
return err
}
// Note: fd.Close omitted since it'll be closed in "compressor"
switch archive.compress_format {
case FormatGzip:
level := gzip.DefaultCompression
switch archive.compress_level {
case LevelBest:
level = gzip.BestCompression
case LevelFast:
level = gzip.BestSpeed
}
compressor, err = gzip.NewWriterLevel(fd, level)
if err != nil {
return err
}
case FormatLzma:
compressor, err = xz.NewWriter(fd)
if err != nil {
return err
}
case FormatLz4:
// The default compression for the lz4 library is Fast, and
// they don't define a Default level otherwise
level := lz4.Fast
switch archive.compress_level {
case LevelBest:
level = lz4.Level9
case LevelFast:
level = lz4.Fast
}
var writer = lz4.NewWriter(fd)
err = writer.Apply(lz4.LegacyOption(true), lz4.CompressionLevelOption(level))
if err != nil {
return err
}
compressor = writer
case FormatNone:
compressor = fd
case FormatZstd:
level := zstd.SpeedDefault
switch archive.compress_level {
case LevelBest:
level = zstd.SpeedBestCompression
case LevelFast:
level = zstd.SpeedFastest
}
compressor, err = zstd.NewWriter(fd, zstd.WithEncoderLevel(level))
if err != nil {
return err
}
default:
log.Print("Unknown or no compression format set, using gzip")
compressor = gzip.NewWriter(fd)
}
if _, err = io.Copy(compressor, archive.buf); err != nil {
return err
}
// call fsync just to be sure
if err := fd.Sync(); err != nil {
return err
}
if err := os.Chmod(path, mode); err != nil {
return err
}
return nil
}
func (archive *Archive) writeCpio() error {
// having a transient function for actually adding files to the archive
// allows the deferred fd.close to run after every copy and prevent having
// tons of open file handles until the copying is all done
copyToArchive := func(source string, header *cpio.Header) error {
if err := archive.cpioWriter.WriteHeader(header); err != nil {
return fmt.Errorf("archive.writeCpio: unable to write header: %w", err)
}
// don't copy actual dirs into the archive, writing the header is enough
if !header.Mode.IsDir() {
if header.Mode.IsRegular() {
fd, err := os.Open(source)
if err != nil {
return fmt.Errorf("archive.writeCpio: uname to open file %q, %w", source, err)
}
defer fd.Close()
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
return fmt.Errorf("archive.writeCpio: unable to write out archive: %w", err)
}
} else if header.Linkname != "" {
// the contents of a symlink is just need the link name
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %w", err)
}
} else {
return fmt.Errorf("archive.writeCpio: unknown type for file: %s", source)
}
}
return nil
}
for i := range archive.items.IterItems() {
if err := copyToArchive(i.sourcePath, i.header); err != nil {
return err
}
}
return nil
}
func (archive *Archive) addDir(dir string) error {
if dir == "/" {
dir = "."
}
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
for i, subdir := range subdirs {
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
archive.items.add(archiveItem{
sourcePath: path,
header: &cpio.Header{
Name: path,
Mode: cpio.ModeDir | 0755,
},
})
}
return nil
}

View File

@@ -1,278 +0,0 @@
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package archive
import (
"reflect"
"testing"
"github.com/cavaliercoder/go-cpio"
)
func TestArchiveItemsAdd(t *testing.T) {
subtests := []struct {
name string
inItems []archiveItem
inItem archiveItem
expected []archiveItem
}{
{
name: "empty list",
inItems: []archiveItem{},
inItem: archiveItem{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
expected: []archiveItem{
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
},
},
{
name: "already exists",
inItems: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
},
inItem: archiveItem{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
expected: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
},
},
{
name: "add new",
inItems: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
{
sourcePath: "/foo/bar1",
header: &cpio.Header{Name: "/foo/bar1"},
},
},
inItem: archiveItem{
sourcePath: "/foo/bar0",
header: &cpio.Header{Name: "/foo/bar0"},
},
expected: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
{
sourcePath: "/foo/bar0",
header: &cpio.Header{Name: "/foo/bar0"},
},
{
sourcePath: "/foo/bar1",
header: &cpio.Header{Name: "/foo/bar1"},
},
},
},
{
name: "add new at beginning",
inItems: []archiveItem{
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
},
inItem: archiveItem{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
expected: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/foo/bar",
header: &cpio.Header{Name: "/foo/bar"},
},
},
},
{
name: "add new at end",
inItems: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
},
inItem: archiveItem{
sourcePath: "/zzz/bazz",
header: &cpio.Header{Name: "/zzz/bazz"},
},
expected: []archiveItem{
{
sourcePath: "/bazz/bar",
header: &cpio.Header{Name: "/bazz/bar"},
},
{
sourcePath: "/foo",
header: &cpio.Header{Name: "/foo"},
},
{
sourcePath: "/zzz/bazz",
header: &cpio.Header{Name: "/zzz/bazz"},
},
},
},
}
for _, st := range subtests {
t.Run(st.name, func(t *testing.T) {
a := archiveItems{items: st.inItems}
a.add(st.inItem)
if !reflect.DeepEqual(st.expected, a.items) {
t.Fatal("expected:", st.expected, " got: ", a.items)
}
})
}
}
func TestExtractFormatLevel(t *testing.T) {
tests := []struct {
name string
in string
expectedFormat CompressFormat
expectedLevel CompressLevel
}{
{
name: "gzip, default level",
in: "gzip:default",
expectedFormat: FormatGzip,
expectedLevel: LevelDefault,
},
{
name: "unknown format, level 12",
in: "pear:12",
expectedFormat: FormatGzip,
expectedLevel: LevelDefault,
},
{
name: "zstd, level not given",
in: "zstd",
expectedFormat: FormatZstd,
expectedLevel: LevelDefault,
},
{
name: "zstd, invalid level 'fast:'",
in: "zstd:fast:",
expectedFormat: FormatZstd,
expectedLevel: LevelDefault,
},
{
name: "zstd, best",
in: "zstd:best",
expectedFormat: FormatZstd,
expectedLevel: LevelBest,
},
{
name: "zstd, level empty :",
in: "zstd:",
expectedFormat: FormatZstd,
expectedLevel: LevelDefault,
},
{
name: "gzip, best",
in: "gzip:best",
expectedFormat: FormatGzip,
expectedLevel: LevelBest,
},
{
name: "<empty>, <empty>",
in: "",
expectedFormat: FormatGzip,
expectedLevel: LevelDefault,
},
{
name: "lzma, fast",
in: "lzma:fast",
expectedFormat: FormatLzma,
expectedLevel: LevelDefault,
},
{
name: "lz4, fast",
in: "lz4:fast",
expectedFormat: FormatLz4,
expectedLevel: LevelFast,
},
{
name: "none",
in: "none",
expectedFormat: FormatNone,
expectedLevel: LevelDefault,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
format, level := ExtractFormatLevel(test.in)
if format != test.expectedFormat {
t.Fatal("format expected: ", test.expectedFormat, " got: ", format)
}
if level != test.expectedLevel {
t.Fatal("level expected: ", test.expectedLevel, " got: ", level)
}
})
}
}

View File

@@ -1,153 +0,0 @@
package bootdeploy
import (
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
)
type BootDeploy struct {
inDir string
outDir string
ubootBoardname string
}
// New returns a new BootDeploy, which then runs:
//
// boot-deploy -d indir -o outDir
//
// ubootBoardname is used for copying in some u-boot files prior to running
// boot-deploy. This is optional, passing an empty string is ok if this is not
// needed.
func New(inDir, outDir, ubootBoardname string) *BootDeploy {
return &BootDeploy{
inDir: inDir,
outDir: outDir,
ubootBoardname: ubootBoardname,
}
}
func (b *BootDeploy) Run() error {
if err := copyUbootFiles(b.inDir, b.ubootBoardname); errors.Is(err, os.ErrNotExist) {
log.Println("u-boot files copying skipped: ", err)
} else {
if err != nil {
log.Fatal("copyUbootFiles: ", err)
}
}
return bootDeploy(b.inDir, b.outDir)
}
func bootDeploy(workDir string, outDir string) error {
// boot-deploy expects the kernel to be in the same dir as initramfs.
// Assume that the kernel is in the output dir...
kernels, _ := filepath.Glob(filepath.Join(outDir, "vmlinuz*"))
if len(kernels) == 0 {
return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*"))
}
// Pick a kernel that does not have suffixes added by boot-deploy
var kernFile string
for _, f := range kernels {
if strings.HasSuffix(f, "-dtb") || strings.HasSuffix(f, "-mtk") {
continue
}
kernFile = f
break
}
kernFd, err := os.Open(kernFile)
if err != nil {
return err
}
defer kernFd.Close()
kernFilename := path.Base(kernFile)
kernFileCopy, err := os.Create(filepath.Join(workDir, kernFilename))
if err != nil {
return err
}
if _, err = io.Copy(kernFileCopy, kernFd); err != nil {
return err
}
if err := kernFileCopy.Close(); err != nil {
return fmt.Errorf("error closing %s: %w", kernFilename, err)
}
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
cmd := exec.Command("boot-deploy",
"-i", "initramfs",
"-k", kernFilename,
"-d", workDir,
"-o", outDir,
"initramfs-extra")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return err
}
return nil
}
// Copy copies the file at srcFile path to a new file at dstFile path
func copy(srcFile, dstFile string) error {
out, err := os.Create(dstFile)
if err != nil {
return err
}
defer func() {
errClose := out.Close()
if err == nil {
err = errClose
}
}()
in, err := os.Open(srcFile)
if err != nil {
return err
}
defer in.Close()
_, err = io.Copy(out, in)
if err != nil {
return err
}
return nil
}
// copyUbootFiles uses deviceinfo_uboot_boardname to copy u-boot files required
// for running boot-deploy
func copyUbootFiles(path, ubootBoardname string) error {
if ubootBoardname == "" {
return nil
}
srcDir := filepath.Join("/usr/share/u-boot", ubootBoardname)
entries, err := os.ReadDir(srcDir)
if err != nil {
return err
}
for _, entry := range entries {
sourcePath := filepath.Join(srcDir, entry.Name())
destPath := filepath.Join(path, entry.Name())
if err := copy(sourcePath, destPath); err != nil {
return err
}
}
return nil
}

View File

@@ -1,65 +0,0 @@
package filelist
import "sync"
type FileLister interface {
List() (*FileList, error)
}
type File struct {
Source string
Dest string
}
type FileList struct {
m map[string]string
sync.RWMutex
}
func NewFileList() *FileList {
return &FileList{
m: make(map[string]string),
}
}
func (f *FileList) Add(src string, dest string) {
f.Lock()
defer f.Unlock()
f.m[src] = dest
}
func (f *FileList) Get(src string) (string, bool) {
f.RLock()
defer f.RUnlock()
dest, found := f.m[src]
return dest, found
}
// Import copies in the contents of src. If a source path already exists when
// importing, then the destination path is updated with the new value.
func (f *FileList) Import(src *FileList) {
for i := range src.IterItems() {
f.Add(i.Source, i.Dest)
}
}
// iterate through the list and and send each one as a new File over the
// returned channel
func (f *FileList) IterItems() <-chan File {
ch := make(chan File)
go func() {
f.RLock()
defer f.RUnlock()
for src, dest := range f.m {
ch <- File{
Source: src,
Dest: dest,
}
}
close(ch)
}()
return ch
}

View File

@@ -1,51 +0,0 @@
package hookdirs
import (
"bufio"
"fmt"
"log"
"os"
"path/filepath"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
)
type HookDirs struct {
path string
}
// New returns a new HookDirs that will use the given path to provide a list
// of directories use.
func New(path string) *HookDirs {
return &HookDirs{
path: path,
}
}
func (h *HookDirs) List() (*filelist.FileList, error) {
log.Printf("- Searching for directories specified in %s", h.path)
files := filelist.NewFileList()
fileInfo, err := os.ReadDir(h.path)
if err != nil {
log.Println("-- Unable to find dir, skipping...")
return files, nil
}
for _, file := range fileInfo {
path := filepath.Join(h.path, file.Name())
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("getHookDirs: unable to open hook file: %w", err)
}
defer f.Close()
log.Printf("-- Creating directories from: %s\n", path)
s := bufio.NewScanner(f)
for s.Scan() {
dir := s.Text()
files.Add(dir, dir)
}
}
return files, nil
}

View File

@@ -1,83 +0,0 @@
package hookfiles
import (
"bufio"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
)
type HookFiles struct {
filePath string
}
// New returns a new HookFiles that will use the given path to provide a list
// of files + any binary dependencies they might have.
func New(filePath string) *HookFiles {
return &HookFiles{
filePath: filePath,
}
}
func (h *HookFiles) List() (*filelist.FileList, error) {
log.Printf("- Searching for file lists from %s", h.filePath)
files := filelist.NewFileList()
fileInfo, err := os.ReadDir(h.filePath)
if err != nil {
log.Println("-- Unable to find dir, skipping...")
return files, nil
}
for _, file := range fileInfo {
path := filepath.Join(h.filePath, file.Name())
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("getHookFiles: unable to open hook file: %w", err)
}
defer f.Close()
log.Printf("-- Including files from: %s\n", path)
if list, err := slurpFiles(f); err != nil {
return nil, fmt.Errorf("hookfiles: unable to process hook file %q: %w", path, err)
} else {
files.Import(list)
}
}
return files, nil
}
func slurpFiles(fd io.Reader) (*filelist.FileList, error) {
files := filelist.NewFileList()
s := bufio.NewScanner(fd)
for s.Scan() {
src, dest, has_dest := strings.Cut(s.Text(), ":")
fFiles, err := misc.GetFiles([]string{src}, true)
if err != nil {
return nil, fmt.Errorf("unable to add %q: %w", src, err)
}
// loop over all returned files from GetFile
for _, file := range fFiles {
if !has_dest {
files.Add(file, file)
} else if len(fFiles) > 1 {
// Don't support specifying dest if src was a glob
// NOTE: this could support this later...
files.Add(file, file)
} else {
// dest path specified, and only 1 file
files.Add(file, dest)
}
}
}
return files, s.Err()
}

View File

@@ -1,42 +0,0 @@
package hookscripts
import (
"log"
"os"
"path/filepath"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
)
type HookScripts struct {
destPath string
scriptsDir string
}
// New returns a new HookScripts that will use the given path to provide a list
// of script files. The destination for each script it set to destPath, using
// the original file name.
func New(scriptsDir string, destPath string) *HookScripts {
return &HookScripts{
destPath: destPath,
scriptsDir: scriptsDir,
}
}
func (h *HookScripts) List() (*filelist.FileList, error) {
log.Printf("- Searching for hook scripts from %s", h.scriptsDir)
files := filelist.NewFileList()
fileInfo, err := os.ReadDir(h.scriptsDir)
if err != nil {
log.Println("-- Unable to find dir, skipping...")
return files, nil
}
for _, file := range fileInfo {
path := filepath.Join(h.scriptsDir, file.Name())
log.Printf("-- Including script: %s\n", path)
files.Add(path, filepath.Join(h.destPath, file.Name()))
}
return files, nil
}

View File

@@ -1,38 +0,0 @@
package initramfs
import (
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
)
// Initramfs allows building arbitrarily complex lists of features, by slurping
// up types that implement FileLister (which includes this type! yippee) and
// combining the output from them.
type Initramfs struct {
features []filelist.FileLister
files *filelist.FileList
}
// New returns a new Initramfs that generate a list of files based on the given
// list of FileListers.
func New(features []filelist.FileLister) *Initramfs {
return &Initramfs{
features: features,
}
}
func (i *Initramfs) List() (*filelist.FileList, error) {
if i.files != nil {
return i.files, nil
}
i.files = filelist.NewFileList()
for _, f := range i.features {
list, err := f.List()
if err != nil {
return nil, err
}
i.files.Import(list)
}
return i.files, nil
}

View File

@@ -1,219 +0,0 @@
package modules
import (
"bufio"
"fmt"
"io"
"log"
"os"
"path/filepath"
"regexp"
"strings"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
)
type Modules struct {
modulesListPath string
modulesList []string
}
// New returns a new Modules that will use the given moduleto provide a list
// of script files.
func New(modulesList []string, modulesListPath string) *Modules {
return &Modules{
modulesList: modulesList,
modulesListPath: modulesListPath,
}
}
func (m *Modules) List() (*filelist.FileList, error) {
kernVer, err := osutil.GetKernelVersion()
if err != nil {
return nil, err
}
files := filelist.NewFileList()
modDir := filepath.Join("/lib/modules", kernVer)
if exists, err := misc.Exists(modDir); !exists {
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
return files, nil
} else if err != nil {
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", modDir, err)
}
// modules.* required by modprobe
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
for _, file := range modprobeFiles {
files.Add(file, file)
}
// slurp up given list of modules
if len(m.modulesList) > 0 {
log.Printf("-- Including kernel modules from deviceinfo")
for _, module := range m.modulesList {
if modFilelist, err := getModule(module, modDir); err != nil {
return nil, fmt.Errorf("unable to get modules from deviceinfo: %w", err)
} else {
for _, file := range modFilelist {
files.Add(file, file)
}
}
}
}
// slurp up modules from lists in modulesListPath
log.Printf("- Searching for kernel modules from %s", m.modulesListPath)
fileInfo, err := os.ReadDir(m.modulesListPath)
if err != nil {
return files, nil
}
for _, file := range fileInfo {
path := filepath.Join(m.modulesListPath, file.Name())
f, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("unable to open module list file %q: %w", path, err)
}
defer f.Close()
log.Printf("-- Including modules from: %s\n", path)
if list, err := slurpModules(f, modDir); err != nil {
return nil, fmt.Errorf("unable to process module list file %q: %w", path, err)
} else {
files.Import(list)
}
}
return files, nil
}
func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
files := filelist.NewFileList()
s := bufio.NewScanner(fd)
for s.Scan() {
line := s.Text()
dir, file := filepath.Split(line)
if file == "" {
// item is a directory
dir = filepath.Join(modDir, dir)
dirs, _ := filepath.Glob(dir)
for _, d := range dirs {
if modFilelist, err := getModulesInDir(d); err != nil {
return nil, fmt.Errorf("unable to get modules dir %q: %w", d, err)
} else {
for _, file := range modFilelist {
files.Add(file, file)
}
}
}
} else if dir == "" {
// item is a module name
if modFilelist, err := getModule(s.Text(), modDir); err != nil {
return nil, fmt.Errorf("unable to get module file %q: %w", s.Text(), err)
} else {
for _, file := range modFilelist {
files.Add(file, file)
}
}
} else {
log.Printf("Unknown module entry: %q", line)
}
}
return files, s.Err()
}
func getModulesInDir(modPath string) (files []string, err error) {
err = filepath.Walk(modPath, func(path string, _ os.FileInfo, err error) error {
if err != nil {
// Unable to walk path
return err
}
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
return nil
}
files = append(files, path)
return nil
})
if err != nil {
return nil, err
}
return
}
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
// file and all of its dependencies.
// Note: it's not necessarily fatal if the module is not found, since it may
// have been built into the kernel
func getModule(modName string, modDir string) (files []string, err error) {
modDep := filepath.Join(modDir, "modules.dep")
if exists, err := misc.Exists(modDep); !exists {
return nil, fmt.Errorf("kernel module.dep not found: %s", modDir)
} else if err != nil {
return nil, fmt.Errorf("received unexpected error when getting module.dep status: %w", err)
}
fd, err := os.Open(modDep)
if err != nil {
return nil, fmt.Errorf("unable to open modules.dep: %w", err)
}
defer fd.Close()
deps, err := getModuleDeps(modName, fd)
if err != nil {
return nil, err
}
for _, dep := range deps {
p := filepath.Join(modDir, dep)
if exists, err := misc.Exists(p); !exists {
return nil, fmt.Errorf("tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p)
} else if err != nil {
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", p, err)
}
files = append(files, p)
}
return
}
// Get the canonicalized name for the module as represented in the given modules.dep io.reader
func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
var deps []string
// split the module name on - and/or _, build a regex for matching
splitRe := regexp.MustCompile("[-_]+")
modNameReStr := splitRe.ReplaceAllString(modName, "[-_]+")
re := regexp.MustCompile("^" + modNameReStr + "$")
s := bufio.NewScanner(modulesDep)
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) == 0 {
continue
}
fields[0] = strings.TrimSuffix(fields[0], ":")
found := re.FindAll([]byte(filepath.Base(stripExts(fields[0]))), -1)
if len(found) > 0 {
deps = append(deps, fields...)
break
}
}
if err := s.Err(); err != nil {
log.Print("Unable to get module + dependencies: ", modName)
return deps, err
}
return deps, nil
}
func stripExts(file string) string {
return strings.Split(file, ".")[0]
}

View File

@@ -1,158 +0,0 @@
package osksdl
import (
"bufio"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
)
type OskSdl struct {
mesaDriver string
}
// New returns a new HookScripts that will use the given path to provide a list
// of script files.
func New(mesaDriverName string) *OskSdl {
return &OskSdl{
mesaDriver: mesaDriverName,
}
}
// Get a list of files and their dependencies related to supporting rootfs full
// disk (d)encryption
func (s *OskSdl) List() (*filelist.FileList, error) {
files := filelist.NewFileList()
if exists, err := misc.Exists("/usr/bin/osk-sdl"); !exists {
return files, nil
} else if err != nil {
return files, fmt.Errorf("received unexpected error when getting status for %q: %w", "/usr/bin/osk-sdl", err)
}
log.Println("- Including osk-sdl support")
confFiles := []string{
"/etc/osk.conf",
"/etc/ts.conf",
"/etc/pointercal",
"/etc/fb.modes",
"/etc/directfbrc",
}
confFileList, err := misc.GetFiles(confFiles, false)
if err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
}
for _, file := range confFileList {
files.Add(file, file)
}
// osk-sdl
oskFiles := []string{
"/usr/bin/osk-sdl",
"/sbin/cryptsetup",
"/usr/lib/libGL.so.1",
}
if oskFileList, err := misc.GetFiles(oskFiles, true); err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
} else {
for _, file := range oskFileList {
files.Add(file, file)
}
}
fontFile, err := getOskConfFontPath("/etc/osk.conf")
if err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add file %q: %w", fontFile, err)
}
files.Add(fontFile, fontFile)
// Directfb
dfbFiles := []string{}
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
if filepath.Ext(path) == ".so" {
dfbFiles = append(dfbFiles, path)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add file %w", err)
}
if dfbFileList, err := misc.GetFiles(dfbFiles, true); err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
} else {
for _, file := range dfbFileList {
files.Add(file, file)
}
}
// tslib
tslibFiles := []string{}
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
if filepath.Ext(path) == ".so" {
tslibFiles = append(tslibFiles, path)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add file: %w", err)
}
libts, _ := filepath.Glob("/usr/lib/libts*")
tslibFiles = append(tslibFiles, libts...)
if tslibFileList, err := misc.GetFiles(tslibFiles, true); err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
} else {
for _, file := range tslibFileList {
files.Add(file, file)
}
}
// mesa hw accel
if s.mesaDriver != "" {
mesaFiles := []string{
"/usr/lib/libEGL.so.1",
"/usr/lib/libGLESv2.so.2",
"/usr/lib/libgbm.so.1",
"/usr/lib/libudev.so.1",
"/usr/lib/xorg/modules/dri/" + s.mesaDriver + "_dri.so",
}
if mesaFileList, err := misc.GetFiles(mesaFiles, true); err != nil {
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
} else {
for _, file := range mesaFileList {
files.Add(file, file)
}
}
}
return files, nil
}
func getOskConfFontPath(oskConfPath string) (string, error) {
var path string
f, err := os.Open(oskConfPath)
if err != nil {
return path, err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
fields := strings.Fields(s.Text())
// "key = val" is 3 fields
if len(fields) > 2 && fields[0] == "keyboard-font" {
path = fields[2]
}
}
if exists, err := misc.Exists(path); !exists {
return path, fmt.Errorf("unable to find font: %s", path)
} else if err != nil {
return path, fmt.Errorf("received unexpected error when getting status for %q: %w", path, err)
}
return path, nil
}

View File

@@ -1,165 +0,0 @@
package misc
import (
"debug/elf"
"fmt"
"os"
"path/filepath"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
)
func GetFiles(list []string, required bool) (files []string, err error) {
for _, file := range list {
filelist, err := getFile(file, required)
if err != nil {
return nil, err
}
files = append(files, filelist...)
}
files = RemoveDuplicates(files)
return
}
func getFile(file string, required bool) (files []string, err error) {
// Expand glob expression
expanded, err := filepath.Glob(file)
if err != nil {
return
}
if len(expanded) > 0 && expanded[0] != file {
for _, path := range expanded {
if globFiles, err := getFile(path, required); err != nil {
return files, err
} else {
files = append(files, globFiles...)
}
}
return RemoveDuplicates(files), nil
}
fileInfo, err := os.Stat(file)
if err != nil {
if required {
return files, fmt.Errorf("getFile: failed to stat file %q: %w", file, err)
}
return files, nil
}
if fileInfo.IsDir() {
// Recurse over directory contents
err := filepath.Walk(file, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if f.IsDir() {
return nil
}
newFiles, err := getFile(path, required)
if err != nil {
return err
}
files = append(files, newFiles...)
return nil
})
if err != nil {
return files, err
}
} else {
files = append(files, file)
// get dependencies for binaries
if _, err := elf.Open(file); err == nil {
if binaryDepFiles, err := getBinaryDeps(file); err != nil {
return files, err
} else {
files = append(files, binaryDepFiles...)
}
}
}
files = RemoveDuplicates(files)
return
}
func getDeps(file string, parents map[string]struct{}) (files []string, err error) {
if _, found := parents[file]; found {
return
}
// get dependencies for binaries
fd, err := elf.Open(file)
if err != nil {
return nil, fmt.Errorf("getDeps: unable to open elf binary %q: %w", file, err)
}
libs, _ := fd.ImportedLibraries()
fd.Close()
files = append(files, file)
parents[file] = struct{}{}
if len(libs) == 0 {
return
}
// we don't recursively search these paths for performance reasons
libdirGlobs := []string{
"/usr/lib",
"/lib",
"/usr/lib/expect*",
}
for _, lib := range libs {
found := false
findDepLoop:
for _, libdirGlob := range libdirGlobs {
libdirs, _ := filepath.Glob(libdirGlob)
for _, libdir := range libdirs {
path := filepath.Join(libdir, lib)
if _, err := os.Stat(path); err == nil {
binaryDepFiles, err := getDeps(path, parents)
if err != nil {
return nil, err
}
files = append(files, binaryDepFiles...)
files = append(files, path)
found = true
break findDepLoop
}
}
}
if !found {
return nil, fmt.Errorf("getDeps: unable to locate dependency for %q: %s", file, lib)
}
}
return
}
// Recursively list all dependencies for a given ELF binary
func getBinaryDeps(file string) ([]string, error) {
// if file is a symlink, resolve dependencies for target
fileStat, err := os.Lstat(file)
if err != nil {
return nil, fmt.Errorf("getBinaryDeps: failed to stat file %q: %w", file, err)
}
// Symlink: write symlink to archive then set 'file' to link target
if fileStat.Mode()&os.ModeSymlink != 0 {
target, err := os.Readlink(file)
if err != nil {
return nil, fmt.Errorf("getBinaryDeps: unable to read symlink %q: %w", file, err)
}
if !filepath.IsAbs(target) {
target, err = osutil.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
if err != nil {
return nil, err
}
}
file = target
}
return getDeps(file, make(map[string]struct{}))
}

View File

@@ -1,65 +0,0 @@
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package misc
import (
"errors"
"log"
"os"
"time"
)
// Merge the contents of "b" into "a", overwriting any previously existing keys
// in "a"
func Merge(a map[string]string, b map[string]string) {
for k, v := range b {
a[k] = v
}
}
// Removes duplicate entries from the given string slice and returns a slice
// with the unique values
func RemoveDuplicates(in []string) (out []string) {
// use a map to "remove" duplicates. the value in the map is totally
// irrelevant
outMap := make(map[string]bool)
for _, s := range in {
if ok := outMap[s]; !ok {
outMap[s] = true
}
}
out = make([]string, 0, len(outMap))
for k := range outMap {
out = append(out, k)
}
return
}
// Prints the execution time of a function, not meant to be very
// sensitive/accurate, but good enough to gauge rough run times.
// Meant to be called as:
//
// defer misc.TimeFunc(time.Now(), "foo")
func TimeFunc(start time.Time, name string) {
elapsed := time.Since(start)
log.Printf("%s completed in: %.2fs", name, elapsed.Seconds())
}
// Exists tests if the given file/dir exists or not. Returns any errors related
// to os.Stat if the type is *not* ErrNotExist. If an error is returned, then
// the value of the returned boolean cannot be trusted.
func Exists(file string) (bool, error) {
_, err := os.Stat(file)
if err == nil {
return true, nil
} else if errors.Is(err, os.ErrNotExist) {
// Don't return the error, the file doesn't exist which is OK
return false, nil
}
// Other errors from os.Stat returned here
return false, err
}

View File

@@ -1,125 +0,0 @@
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package misc
import (
"reflect"
"sort"
"testing"
)
func TestMerge(t *testing.T) {
subtests := []struct {
name string
inA map[string]string
inB map[string]string
expected map[string]string
}{
{
name: "empty B",
inA: map[string]string{
"foo": "bar",
"banana": "airplane",
},
inB: map[string]string{},
expected: map[string]string{
"foo": "bar",
"banana": "airplane",
},
},
{
name: "empty A",
inA: map[string]string{},
inB: map[string]string{
"foo": "bar",
"banana": "airplane",
},
expected: map[string]string{
"foo": "bar",
"banana": "airplane",
},
},
{
name: "both populated, some duplicates",
inA: map[string]string{
"bar": "bazz",
"banana": "yellow",
"guava": "green",
},
inB: map[string]string{
"foo": "bar",
"banana": "airplane",
},
expected: map[string]string{
"foo": "bar",
"guava": "green",
"banana": "airplane",
"bar": "bazz",
},
},
}
for _, st := range subtests {
t.Run(st.name, func(t *testing.T) {
out := st.inA
Merge(out, st.inB)
if !reflect.DeepEqual(st.expected, out) {
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
}
})
}
}
func TestRemoveDuplicates(t *testing.T) {
subtests := []struct {
name string
in []string
expected []string
}{
{
name: "no duplicates",
in: []string{
"foo",
"bar",
"banana",
"airplane",
},
expected: []string{
"foo",
"bar",
"banana",
"airplane",
},
},
{
name: "all duplicates",
in: []string{
"foo",
"foo",
"foo",
"foo",
},
expected: []string{
"foo",
},
},
{
name: "empty",
in: []string{},
expected: []string{},
},
}
for _, st := range subtests {
t.Run(st.name, func(t *testing.T) {
// note: sorting to make comparison easier later
sort.Strings(st.expected)
out := RemoveDuplicates(st.in)
sort.Strings(out)
if !reflect.DeepEqual(st.expected, out) {
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
}
})
}
}

694
main.go Normal file
View File

@@ -0,0 +1,694 @@
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package main
import (
"bufio"
"debug/elf"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/archive"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
)
func timeFunc(start time.Time, name string) {
elapsed := time.Since(start)
log.Printf("%s completed in: %s", name, elapsed)
}
func main() {
deviceinfoFile := "/etc/deviceinfo"
if !exists(deviceinfoFile) {
log.Print("NOTE: deviceinfo (from device package) not installed yet, " +
"not building the initramfs now (it should get built later " +
"automatically.)")
return
}
devinfo, err := deviceinfo.ReadDeviceinfo(deviceinfoFile)
if err != nil {
log.Fatal(err)
}
outDir := flag.String("d", "/boot", "Directory to output initfs(-extra) and other boot files")
flag.Parse()
defer timeFunc(time.Now(), "mkinitfs")
kernVer, err := getKernelVersion()
if err != nil {
log.Fatal(err)
}
// temporary working dir
workDir, err := ioutil.TempDir("", "mkinitfs")
if err != nil {
log.Fatal("Unable to create temporary work directory:", err)
}
defer os.RemoveAll(workDir)
log.Print("Generating for kernel version: ", kernVer)
log.Print("Output directory: ", *outDir)
if err := generateInitfs("initramfs", workDir, kernVer, devinfo); err != nil {
log.Fatal("generateInitfs: ", err)
}
if err := generateInitfsExtra("initramfs-extra", workDir, devinfo); err != nil {
log.Fatal("generateInitfsExtra: ", err)
}
// Final processing of initramfs / kernel is done by boot-deploy
if err := bootDeploy(workDir, *outDir); err != nil {
log.Fatal("bootDeploy: ", err)
}
}
func bootDeploy(workDir string, outDir string) error {
// boot-deploy expects the kernel to be in the same dir as initramfs.
// Assume that the kernel is in the output dir...
log.Print("== Using boot-deploy to finalize/install files ==")
kernels, _ := filepath.Glob(filepath.Join(outDir, "vmlinuz*"))
if len(kernels) == 0 {
return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*"))
}
// Pick a kernel that does not have suffixes added by boot-deploy
var kernFile string
for _, f := range kernels {
if strings.HasSuffix(f, "-dtb") || strings.HasSuffix(f, "-mtk") {
continue
}
kernFile = f
break
}
kernFd, err := os.Open(kernFile)
if err != nil {
return err
}
defer kernFd.Close()
kernFileCopy, err := os.Create(filepath.Join(workDir, "vmlinuz"))
if err != nil {
return err
}
if _, err = io.Copy(kernFileCopy, kernFd); err != nil {
return err
}
kernFileCopy.Close()
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
cmd := exec.Command("boot-deploy",
"-i", "initramfs",
"-k", "vmlinuz",
"-d", workDir,
"-o", outDir,
"initramfs-extra")
if !exists(cmd.Path) {
return errors.New("boot-deploy command not found")
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Print("'boot-deploy' command failed")
return err
}
return nil
}
func exists(file string) bool {
if _, err := os.Stat(file); err == nil {
return true
}
return false
}
func getHookFiles(filesdir string) misc.StringSet {
fileInfo, err := ioutil.ReadDir(filesdir)
if err != nil {
log.Fatal(err)
}
files := make(misc.StringSet)
for _, file := range fileInfo {
path := filepath.Join(filesdir, file.Name())
f, err := os.Open(path)
if err != nil {
log.Fatal(err)
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if !exists(s.Text()) {
log.Fatalf("Unable to find file %q required by %q", s.Text(), path)
}
files[s.Text()] = false
}
if err := s.Err(); err != nil {
log.Fatal(err)
}
}
return files
}
// Recursively list all dependencies for a given ELF binary
func getBinaryDeps(files misc.StringSet, file string) error {
// if file is a symlink, resolve dependencies for target
fileStat, err := os.Lstat(file)
if err != nil {
log.Print("getBinaryDeps: failed to stat file")
return err
}
// Symlink: write symlink to archive then set 'file' to link target
if fileStat.Mode()&os.ModeSymlink != 0 {
target, err := os.Readlink(file)
if err != nil {
log.Print("getBinaryDeps: unable to read symlink: ", file)
return err
}
if !filepath.IsAbs(target) {
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
if err != nil {
return err
}
}
if err := getBinaryDeps(files, target); err != nil {
return err
}
return err
}
// get dependencies for binaries
fd, err := elf.Open(file)
if err != nil {
log.Fatal(err)
}
libs, _ := fd.ImportedLibraries()
fd.Close()
files[file] = false
if len(libs) == 0 {
return err
}
libdirs := []string{"/usr/lib", "/lib"}
for _, lib := range libs {
found := false
for _, libdir := range libdirs {
path := filepath.Join(libdir, lib)
if _, err := os.Stat(path); err == nil {
err := getBinaryDeps(files, path)
if err != nil {
return err
}
files[path] = false
found = true
break
}
}
if !found {
log.Fatalf("Unable to locate dependency for %q: %s", file, lib)
}
}
return nil
}
func getFiles(files misc.StringSet, newFiles misc.StringSet, required bool) error {
for file := range newFiles {
err := getFile(files, file, required)
if err != nil {
return err
}
}
return nil
}
func getFile(files misc.StringSet, file string, required bool) error {
if !exists(file) {
if required {
return errors.New("getFile: File does not exist :" + file)
}
return nil
}
files[file] = false
// get dependencies for binaries
if _, err := elf.Open(file); err != nil {
// file is not an elf, so don't resolve lib dependencies
return nil
}
err := getBinaryDeps(files, file)
if err != nil {
return err
}
return nil
}
func getOskConfFontPath(oskConfPath string) (string, error) {
var path string
f, err := os.Open(oskConfPath)
if err != nil {
return path, err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
fields := strings.Fields(s.Text())
// "key = val" is 3 fields
if len(fields) > 2 && fields[0] == "keyboard-font" {
path = fields[2]
}
}
if !exists(path) {
return path, errors.New("Unable to find font: " + path)
}
return path, nil
}
// Get a list of files and their dependencies related to supporting rootfs full
// disk (d)encryption
func getFdeFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
confFiles := misc.StringSet{
"/etc/osk.conf": false,
"/etc/ts.conf": false,
"/etc/pointercal": false,
"/etc/fb.modes": false,
"/etc/directfbrc": false,
}
// TODO: this shouldn't be false? though some files (pointercal) don't always exist...
if err := getFiles(files, confFiles, false); err != nil {
return err
}
// osk-sdl
oskFiles := misc.StringSet{
"/usr/bin/osk-sdl": false,
"/sbin/cryptsetup": false,
"/usr/lib/libGL.so.1": false}
if err := getFiles(files, oskFiles, true); err != nil {
return err
}
fontFile, err := getOskConfFontPath("/etc/osk.conf")
if err != nil {
return err
}
files[fontFile] = false
// Directfb
dfbFiles := make(misc.StringSet)
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
if filepath.Ext(path) == ".so" {
dfbFiles[path] = false
}
return nil
})
if err != nil {
log.Print("getBinaryDeps: failed to stat file")
return err
}
if err := getFiles(files, dfbFiles, true); err != nil {
return err
}
// tslib
tslibFiles := make(misc.StringSet)
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
if filepath.Ext(path) == ".so" {
tslibFiles[path] = false
}
return nil
})
if err != nil {
log.Print("getBinaryDeps: failed to stat file")
return err
}
libts, _ := filepath.Glob("/usr/lib/libts*")
for _, file := range libts {
tslibFiles[file] = false
}
if err = getFiles(files, tslibFiles, true); err != nil {
return err
}
// mesa hw accel
if devinfo.MesaDriver != "" {
mesaFiles := misc.StringSet{
"/usr/lib/libEGL.so.1": false,
"/usr/lib/libGLESv2.so.2": false,
"/usr/lib/libgbm.so.1": false,
"/usr/lib/libudev.so.1": false,
"/usr/lib/xorg/modules/dri/" + devinfo.MesaDriver + "_dri.so": false,
}
if err := getFiles(files, mesaFiles, true); err != nil {
return err
}
}
return nil
}
func getHookScripts(files misc.StringSet) {
scripts, _ := filepath.Glob("/etc/postmarketos-mkinitfs/hooks/*.sh")
for _, script := range scripts {
files[script] = false
}
}
func getInitfsExtraFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
log.Println("== Generating initramfs extra ==")
binariesExtra := misc.StringSet{
"/lib/libz.so.1": false,
"/sbin/dmsetup": false,
"/sbin/e2fsck": false,
"/usr/sbin/parted": false,
"/usr/sbin/resize2fs": false,
"/usr/sbin/resize.f2fs": false,
}
log.Println("- Including extra binaries")
if err := getFiles(files, binariesExtra, true); err != nil {
return err
}
if exists("/usr/bin/osk-sdl") {
log.Println("- Including FDE support")
if err := getFdeFiles(files, devinfo); err != nil {
return err
}
} else {
log.Println("- *NOT* including FDE support")
}
return nil
}
func getInitfsFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
log.Println("== Generating initramfs ==")
requiredFiles := misc.StringSet{
"/bin/busybox": false,
"/bin/sh": false,
"/bin/busybox-extras": false,
"/usr/sbin/telnetd": false,
"/sbin/kpartx": false,
"/etc/deviceinfo": false,
}
// Hook files & scripts
if exists("/etc/postmarketos-mkinitfs/files") {
log.Println("- Including hook files")
hookFiles := getHookFiles("/etc/postmarketos-mkinitfs/files")
if err := getFiles(files, hookFiles, true); err != nil {
return err
}
}
log.Println("- Including hook scripts")
getHookScripts(files)
log.Println("- Including required binaries")
if err := getFiles(files, requiredFiles, true); err != nil {
return err
}
return nil
}
func getInitfsModules(files misc.StringSet, devinfo deviceinfo.DeviceInfo, kernelVer string) error {
log.Println("- Including kernel modules")
modDir := filepath.Join("/lib/modules", kernelVer)
if !exists(modDir) {
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
return nil
}
// modules.* required by modprobe
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
for _, file := range modprobeFiles {
files[file] = false
}
// module name (without extension), or directory (trailing slash is important! globs OK)
requiredModules := []string{
"loop",
"dm-crypt",
"kernel/fs/overlayfs/",
"kernel/crypto/",
"kernel/arch/*/crypto/",
}
for _, item := range requiredModules {
dir, file := filepath.Split(item)
if file == "" {
// item is a directory
dir = filepath.Join(modDir, dir)
dirs, _ := filepath.Glob(dir)
for _, d := range dirs {
if err := getModulesInDir(files, d); err != nil {
log.Print("Unable to get modules in dir: ", d)
return err
}
}
} else if dir == "" {
// item is a module name
if err := getModule(files, file, modDir); err != nil {
log.Print("Unable to get module: ", file)
return err
}
} else {
log.Printf("Unknown module entry: %q", item)
}
}
// deviceinfo modules
for _, module := range strings.Fields(devinfo.ModulesInitfs) {
if err := getModule(files, module, modDir); err != nil {
log.Print("Unable to get modules from deviceinfo")
return err
}
}
// /etc/postmarketos-mkinitfs/modules/*.modules
initfsModFiles, _ := filepath.Glob("/etc/postmarketos-mkinitfs/modules/*.modules")
for _, modFile := range initfsModFiles {
f, err := os.Open(modFile)
if err != nil {
log.Print("getInitfsModules: unable to open mkinitfs modules file: ", modFile)
return err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if err := getModule(files, s.Text(), modDir); err != nil {
log.Print("getInitfsModules: unable to get module file: ", s.Text())
return err
}
}
}
return nil
}
func getKernelReleaseFile() (string, error) {
files, _ := filepath.Glob("/usr/share/kernel/*/kernel.release")
// only one kernel flavor supported
if len(files) != 1 {
return "", fmt.Errorf("only one kernel release/flavor is supported, found: %q", files)
}
return files[0], nil
}
func getKernelVersion() (string, error) {
var version string
releaseFile, err := getKernelReleaseFile()
if err != nil {
return version, err
}
contents, err := os.ReadFile(releaseFile)
if err != nil {
return version, err
}
return strings.TrimSpace(string(contents)), nil
}
func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo.DeviceInfo) error {
initfsArchive, err := archive.New()
if err != nil {
return err
}
requiredDirs := []string{
"/bin", "/sbin", "/usr/bin", "/usr/sbin", "/proc", "/sys",
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
}
for _, dir := range requiredDirs {
initfsArchive.Dirs[dir] = false
}
if err := getInitfsFiles(initfsArchive.Files, devinfo); err != nil {
return err
}
if err := getInitfsModules(initfsArchive.Files, devinfo, kernVer); err != nil {
return err
}
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil {
return err
}
// splash images
log.Println("- Including splash images")
splashFiles, _ := filepath.Glob("/usr/share/postmarketos-splashes/*.ppm.gz")
for _, file := range splashFiles {
// splash images are expected at /<file>
if err := initfsArchive.AddFile(file, filepath.Join("/", filepath.Base(file))); err != nil {
return err
}
}
// initfs_functions
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil {
return err
}
log.Println("- Writing and verifying initramfs archive")
if err := initfsArchive.Write(filepath.Join(path, name), os.FileMode(0644)); err != nil {
return err
}
return nil
}
func generateInitfsExtra(name string, path string, devinfo deviceinfo.DeviceInfo) error {
initfsExtraArchive, err := archive.New()
if err != nil {
return err
}
if err := getInitfsExtraFiles(initfsExtraArchive.Files, devinfo); err != nil {
return err
}
log.Println("- Writing and verifying initramfs-extra archive")
if err := initfsExtraArchive.Write(filepath.Join(path, name), os.FileMode(0644)); err != nil {
return err
}
return nil
}
func stripExts(file string) string {
return strings.Split(file, ".")[0]
}
func getModulesInDir(files misc.StringSet, modPath string) error {
err := filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error {
// TODO: need to support more extensions?
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
return nil
}
files[path] = false
return nil
})
if err != nil {
return err
}
return nil
}
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
// file and all of its dependencies.
// Note: it's not necessarily fatal if the module is not found, since it may
// have been built into the kernel
// TODO: look for it in modules.builtin, and make it fatal if it can't be found
// anywhere
func getModule(files misc.StringSet, modName string, modDir string) error {
modDep := filepath.Join(modDir, "modules.dep")
if !exists(modDep) {
log.Fatal("Kernel module.dep not found: ", modDir)
}
fd, err := os.Open(modDep)
if err != nil {
log.Print("Unable to open modules.dep: ", modDep)
return err
}
defer fd.Close()
deps, err := getModuleDeps(modName, fd)
if err != nil {
return err
}
for _, dep := range deps {
p := filepath.Join(modDir, dep)
if !exists(p) {
log.Print(fmt.Sprintf("Tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p))
return err
}
files[p] = false
}
return err
}
// Get the canonicalized name for the module as represented in the given modules.dep io.reader
func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
var deps []string
// split the module name on - and/or _, build a regex for matching
splitRe := regexp.MustCompile("[-_]+")
modNameReStr := splitRe.ReplaceAllString(modName, "[-_]+")
re := regexp.MustCompile("^" + modNameReStr + "$")
s := bufio.NewScanner(modulesDep)
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) == 0 {
continue
}
fields[0] = strings.TrimSuffix(fields[0], ":")
found := re.FindAll([]byte(filepath.Base(stripExts(fields[0]))), -1)
if len(found) > 0 {
deps = append(deps, fields...)
break
}
}
if err := s.Err(); err != nil {
log.Print("Unable to get module + dependencies: ", modName)
return deps, err
}
return deps, nil
}

View File

@@ -1,7 +1,7 @@
// Copyright 2023 Clayton Craft <clayton@craftyguy.net>
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package modules
package main
import (
"strings"
@@ -27,6 +27,18 @@ func TestStripExts(t *testing.T) {
}
}
func stringSlicesEqual(a []string, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
var testModuleDep string = `
kernel/sound/soc/codecs/snd-soc-msm8916-digital.ko:
kernel/net/sched/act_ipt.ko.xz: kernel/net/netfilter/x_tables.ko.xz
@@ -68,15 +80,3 @@ func TestGetModuleDeps(t *testing.T) {
}
}
}
func stringSlicesEqual(a []string, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}

225
pkgs/archive/archive.go Normal file
View File

@@ -0,0 +1,225 @@
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package archive
import (
"bytes"
"compress/flate"
"github.com/cavaliercoder/go-cpio"
"github.com/klauspost/pgzip"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
"io"
"log"
"os"
"path/filepath"
"strings"
)
type Archive struct {
Dirs misc.StringSet
Files misc.StringSet
cpioWriter *cpio.Writer
buf *bytes.Buffer
}
func New() (*Archive, error) {
buf := new(bytes.Buffer)
archive := &Archive{
cpioWriter: cpio.NewWriter(buf),
Files: make(misc.StringSet),
Dirs: make(misc.StringSet),
buf: buf,
}
return archive, nil
}
func (archive *Archive) Write(path string, mode os.FileMode) error {
if err := archive.writeCpio(); err != nil {
return err
}
if err := archive.cpioWriter.Close(); err != nil {
return err
}
// Write archive to path
if err := archive.writeCompressed(path, mode); err != nil {
log.Print("Unable to write archive to location: ", path)
return err
}
if err := os.Chmod(path, mode); err != nil {
return err
}
return nil
}
func (archive *Archive) AddFile(file string, dest string) error {
if err := archive.addDir(filepath.Dir(dest)); err != nil {
return err
}
if archive.Files[file] {
// Already written to cpio
return nil
}
fileStat, err := os.Lstat(file)
if err != nil {
log.Print("AddFile: failed to stat file: ", file)
return err
}
// Symlink: write symlink to archive then set 'file' to link target
if fileStat.Mode()&os.ModeSymlink != 0 {
// log.Printf("File %q is a symlink", file)
target, err := os.Readlink(file)
if err != nil {
log.Print("AddFile: failed to get symlink target: ", file)
return err
}
destFilename := strings.TrimPrefix(dest, "/")
hdr := &cpio.Header{
Name: destFilename,
Linkname: target,
Mode: 0644 | cpio.ModeSymlink,
Size: int64(len(target)),
// Checksum: 1,
}
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
return err
}
if _, err = archive.cpioWriter.Write([]byte(target)); err != nil {
return err
}
archive.Files[file] = true
if filepath.Dir(target) == "." {
target = filepath.Join(filepath.Dir(file), target)
}
// make sure target is an absolute path
if !filepath.IsAbs(target) {
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
if err != nil {
return err
}
}
// TODO: add verbose mode, print stuff like this:
// log.Printf("symlink: %q, target: %q", file, target)
// write symlink target
err = archive.AddFile(target, target)
return err
}
// log.Printf("writing file: %q", file)
fd, err := os.Open(file)
if err != nil {
return err
}
defer fd.Close()
destFilename := strings.TrimPrefix(dest, "/")
hdr := &cpio.Header{
Name: destFilename,
Mode: cpio.FileMode(fileStat.Mode().Perm()),
Size: fileStat.Size(),
// Checksum: 1,
}
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
return err
}
if _, err = io.Copy(archive.cpioWriter, fd); err != nil {
return err
}
archive.Files[file] = true
return nil
}
func (archive *Archive) writeCompressed(path string, mode os.FileMode) error {
// TODO: support other compression formats, based on deviceinfo
fd, err := os.Create(path)
if err != nil {
return err
}
gz, err := pgzip.NewWriterLevel(fd, flate.BestSpeed)
if err != nil {
return err
}
if _, err = io.Copy(gz, archive.buf); err != nil {
return err
}
if err := gz.Close(); err != nil {
return err
}
// call fsync just to be sure
if err := fd.Sync(); err != nil {
return err
}
if err := os.Chmod(path, mode); err != nil {
return err
}
return nil
}
func (archive *Archive) writeCpio() error {
// Write any dirs added explicitly
for dir := range archive.Dirs {
archive.addDir(dir)
}
// Write files and any missing parent dirs
for file, imported := range archive.Files {
if imported {
continue
}
if err := archive.AddFile(file, file); err != nil {
return err
}
}
return nil
}
func (archive *Archive) addDir(dir string) error {
if archive.Dirs[dir] {
// Already imported
return nil
}
if dir == "/" {
dir = "."
}
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
for i, subdir := range subdirs {
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
if archive.Dirs[path] {
// Subdir already imported
continue
}
err := archive.cpioWriter.WriteHeader(&cpio.Header{
Name: path,
Mode: cpio.ModeDir | 0755,
})
if err != nil {
return err
}
archive.Dirs[path] = true
// log.Print("wrote dir: ", path)
}
return nil
}

View File

@@ -11,43 +11,53 @@ import (
"os"
"reflect"
"strings"
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
)
type DeviceInfo struct {
InitfsCompression string
InitfsExtraCompression string
MesaDriver string
ModulesInitfs string
UbootBoardname string
AppendDtb string
Arch string
BootimgAppendSEAndroidEnforce string
BootimgBlobpack string
BootimgDtbSecond string
BootimgMtkMkimage string
BootimgPxa string
BootimgQcdt string
Dtb string
FlashKernelOnUpdate string
FlashOffsetBase string
FlashOffsetKernel string
FlashOffsetRamdisk string
FlashOffsetSecond string
FlashOffsetTags string
FlashPagesize string
GenerateBootimg string
GenerateLegacyUbootInitfs string
InitfsCompression string
KernelCmdline string
LegacyUbootLoadAddress string
MesaDriver string
MkinitfsPostprocess string
ModulesInitfs string
}
// Reads the relevant entries from "file" into DeviceInfo struct
// Any already-set entries will be overwriten if they are present
// in "file"
func (d *DeviceInfo) ReadDeviceinfo(file string) error {
if exists, err := misc.Exists(file); !exists {
return fmt.Errorf("%q not found, required by mkinitfs", file)
} else if err != nil {
return fmt.Errorf("unexpected error getting status for %q: %s", file, err)
}
func ReadDeviceinfo(file string) (DeviceInfo, error) {
var deviceinfo DeviceInfo
fd, err := os.Open(file)
if err != nil {
return err
return deviceinfo, err
}
defer fd.Close()
if err := d.unmarshal(fd); err != nil {
return err
if err := unmarshal(fd, &deviceinfo); err != nil {
return deviceinfo, err
}
return nil
return deviceinfo, nil
}
// Unmarshals a deviceinfo into a DeviceInfo struct
func (d *DeviceInfo) unmarshal(r io.Reader) error {
func unmarshal(r io.Reader, devinfo *DeviceInfo) error {
s := bufio.NewScanner(r)
for s.Scan() {
line := s.Text()
@@ -83,7 +93,7 @@ func (d *DeviceInfo) unmarshal(r io.Reader) error {
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
}
field := reflect.ValueOf(d).Elem().FieldByName(fieldName)
field := reflect.ValueOf(devinfo).Elem().FieldByName(fieldName)
if !field.IsValid() {
// an option that meets the deviceinfo "specification", but isn't
// one we care about in this module
@@ -109,10 +119,7 @@ func nameToField(name string) string {
if p == "deviceinfo" {
continue
}
if len(p) < 1 {
continue
}
field = field + strings.ToUpper(p[:1]) + p[1:]
field = field + strings.Title(p)
}
return field

View File

@@ -10,32 +10,6 @@ import (
"testing"
)
// Test ReadDeviceinfo and the logic of reading from multiple files
func TestReadDeviceinfo(t *testing.T) {
modules_expected := "panfrost foo bar bazz"
mesa_expected := "msm"
var devinfo DeviceInfo
err := devinfo.ReadDeviceinfo("./test_resources/deviceinfo-missing")
if !strings.Contains(err.Error(), "required by mkinitfs") {
t.Errorf("received an unexpected err: %s", err)
}
err = devinfo.ReadDeviceinfo("./test_resources/deviceinfo-first")
if err != nil {
t.Errorf("received an unexpected err: %s", err)
}
err = devinfo.ReadDeviceinfo("./test_resources/deviceinfo-msm")
if err != nil {
t.Errorf("received an unexpected err: %s", err)
}
if devinfo.ModulesInitfs != modules_expected {
t.Errorf("expected %q, got: %q", modules_expected, devinfo.ModulesInitfs)
}
if devinfo.MesaDriver != mesa_expected {
t.Errorf("expected %q, got: %q", mesa_expected, devinfo.MesaDriver)
}
}
// Test conversion of name to DeviceInfo struct field format
func TestNameToField(t *testing.T) {
tables := []struct {
@@ -47,7 +21,6 @@ func TestNameToField(t *testing.T) {
{"deviceinfo_modules_initfs", "ModulesInitfs"},
{"modules_initfs", "ModulesInitfs"},
{"deviceinfo_modules_initfs___", "ModulesInitfs"},
{"deviceinfo_initfs_extra_compression", "InitfsExtraCompression"},
}
for _, table := range tables {
@@ -70,9 +43,15 @@ func TestUnmarshal(t *testing.T) {
{"ModulesInitfs", "deviceinfo_modules_initfs=\"panfrost foo bar bazz\"\n", "panfrost foo bar bazz"},
{"ModulesInitfs", "deviceinfo_modules_initfs=\"panfrost foo bar bazz\"", "panfrost foo bar bazz"},
// line with multiple '='
{"InitfsCompression", "deviceinfo_initfs_compression=zstd:--foo=1 -T0 --bar=bazz", "zstd:--foo=1 -T0 --bar=bazz"},
{"KernelCmdline",
"deviceinfo_kernel_cmdline=\"PMOS_NO_OUTPUT_REDIRECT fw_devlink=off nvme_core.default_ps_max_latency_us=5500 pcie_aspm.policy=performance\"\n",
"PMOS_NO_OUTPUT_REDIRECT fw_devlink=off nvme_core.default_ps_max_latency_us=5500 pcie_aspm.policy=performance"},
// empty option
{"ModulesInitfs", "deviceinfo_modules_initfs=\"\"\n", ""},
{"Dtb", "deviceinfo_dtb=\"freescale/imx8mq-librem5-r2 freescale/imx8mq-librem5-r3 freescale/imx8mq-librem5-r4\"\n",
"freescale/imx8mq-librem5-r2 freescale/imx8mq-librem5-r3 freescale/imx8mq-librem5-r4"},
// valid deviceinfo line, just not used in this module
{"", "deviceinfo_codename=\"pine64-pinebookpro\"", ""},
// line with comment at the end
{"MesaDriver", "deviceinfo_mesa_driver=\"panfrost\" # this is a nice driver", "panfrost"},
{"", "# this is a comment!\n", ""},
@@ -84,7 +63,7 @@ func TestUnmarshal(t *testing.T) {
var d DeviceInfo
for _, table := range tables {
testName := fmt.Sprintf("unmarshal::'%s':", strings.ReplaceAll(table.in, "\n", "\\n"))
if err := d.unmarshal(strings.NewReader(table.in)); err != nil {
if err := unmarshal(strings.NewReader(table.in), &d); err != nil {
t.Errorf("%s received an unexpected err: ", err)
}

View File

@@ -1,2 +0,0 @@
deviceinfo_modules_initfs="panfrost foo bar bazz"
deviceinfo_mesa_driver="panfrost"

View File

@@ -1 +0,0 @@
deviceinfo_mesa_driver="msm"

View File

@@ -1,15 +1,17 @@
package osutil
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
// SPDX-License-Identifier: GPL-3.0-or-later
package misc
import (
"fmt"
"golang.org/x/sys/unix"
"log"
"os"
"path/filepath"
"strings"
"golang.org/x/sys/unix"
)
type StringSet map[string]bool
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
// absolute path
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
@@ -46,29 +48,3 @@ func FreeSpace(path string) (uint64, error) {
size := stat.Bavail * uint64(stat.Bsize)
return size, nil
}
func getKernelReleaseFile() (string, error) {
files, _ := filepath.Glob("/usr/share/kernel/*/kernel.release")
// only one kernel flavor supported
if len(files) != 1 {
return "", fmt.Errorf("only one kernel release/flavor is supported, found: %q", files)
}
return files[0], nil
}
func GetKernelVersion() (string, error) {
var version string
releaseFile, err := getKernelReleaseFile()
if err != nil {
return version, err
}
contents, err := os.ReadFile(releaseFile)
if err != nil {
return version, err
}
return strings.TrimSpace(string(contents)), nil
}