Compare commits
1 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
8f53926fb5 |
@@ -1,10 +1,7 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
# global settings
|
# global settings
|
||||||
image: alpine:edge
|
image: alpine:latest
|
||||||
|
|
||||||
variables:
|
|
||||||
GOFLAGS: "-buildvcs=false"
|
|
||||||
|
|
||||||
stages:
|
stages:
|
||||||
- lint
|
- lint
|
||||||
@@ -24,13 +21,23 @@ stages:
|
|||||||
- merge_requests
|
- merge_requests
|
||||||
- tags
|
- tags
|
||||||
|
|
||||||
|
# device documentation
|
||||||
|
gofmt linting:
|
||||||
|
stage: lint
|
||||||
|
allow_failure: true
|
||||||
|
<<: *only-default
|
||||||
|
before_script:
|
||||||
|
- apk -q add go
|
||||||
|
script:
|
||||||
|
- .gitlab-ci/check_gofmt.sh
|
||||||
|
|
||||||
build:
|
build:
|
||||||
stage: build
|
stage: build
|
||||||
<<: *only-default
|
<<: *only-default
|
||||||
before_script:
|
before_script:
|
||||||
- apk -q add go staticcheck make
|
- apk -q add go
|
||||||
script:
|
script:
|
||||||
- make test
|
- go build -v
|
||||||
- make
|
- go test ./...
|
||||||
artifacts:
|
artifacts:
|
||||||
expire_in: 1 week
|
expire_in: 1 week
|
||||||
|
11
.gitlab-ci/check_gofmt.sh
Executable file
11
.gitlab-ci/check_gofmt.sh
Executable file
@@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
files="$(gofmt -l .)"
|
||||||
|
|
||||||
|
[ -z "$files" ] && exit 0
|
||||||
|
|
||||||
|
# run gofmt to print out the diff of what needs to be changed
|
||||||
|
|
||||||
|
gofmt -d -e .
|
||||||
|
|
||||||
|
exit 1
|
54
Makefile
54
Makefile
@@ -1,54 +0,0 @@
|
|||||||
.POSIX:
|
|
||||||
.SUFFIXES:
|
|
||||||
|
|
||||||
PREFIX?=/usr/local
|
|
||||||
BINDIR?=$(PREFIX)/sbin
|
|
||||||
SHAREDIR?=$(PREFIX)/share
|
|
||||||
GO?=go
|
|
||||||
GOFLAGS?=
|
|
||||||
LDFLAGS+=-s -w
|
|
||||||
RM?=rm -f
|
|
||||||
GOTEST=go test -count=1 -race
|
|
||||||
|
|
||||||
GOSRC!=find * -name '*.go'
|
|
||||||
GOSRC+=go.mod go.sum
|
|
||||||
|
|
||||||
all: postmarketos-mkinitfs
|
|
||||||
|
|
||||||
postmarketos-mkinitfs: $(GOSRC)
|
|
||||||
$(GO) build $(GOFLAGS) -ldflags "$(LDFLAGS)" -o postmarketos-mkinitfs
|
|
||||||
|
|
||||||
.PHONY: fmt
|
|
||||||
fmt:
|
|
||||||
gofmt -w .
|
|
||||||
|
|
||||||
test:
|
|
||||||
@if [ `gofmt -l . | wc -l` -ne 0 ]; then \
|
|
||||||
gofmt -d .; \
|
|
||||||
echo "ERROR: source files need reformatting with gofmt"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
@staticcheck ./...
|
|
||||||
|
|
||||||
@$(GOTEST) ./...
|
|
||||||
|
|
||||||
clean:
|
|
||||||
$(RM) postmarketos-mkinitfs
|
|
||||||
|
|
||||||
install: $(DOCS) postmarketos-mkinitfs
|
|
||||||
install -Dm755 postmarketos-mkinitfs -t $(DESTDIR)$(BINDIR)/
|
|
||||||
ln -sf postmarketos-mkinitfs $(DESTDIR)$(BINDIR)/mkinitfs
|
|
||||||
|
|
||||||
.PHONY: checkinstall
|
|
||||||
checkinstall:
|
|
||||||
test -e $(DESTDIR)$(BINDIR)/postmarketos-mkinitfs
|
|
||||||
test -L $(DESTDIR)$(BINDIR)/mkinitfs
|
|
||||||
|
|
||||||
RMDIR_IF_EMPTY:=sh -c '! [ -d $$0 ] || ls -1qA $$0 | grep -q . || rmdir $$0'
|
|
||||||
|
|
||||||
uninstall:
|
|
||||||
$(RM) $(DESTDIR)$(BINDIR)/postmarketos-mkinitfs
|
|
||||||
$(RM) $(DESTDIR)$(BINDIR)/mkinitfs
|
|
||||||
${RMDIR_IF_EMPTY} $(DESTDIR)$(BINDIR)
|
|
||||||
|
|
||||||
.PHONY: all clean install uninstall test
|
|
45
README.md
45
README.md
@@ -1,45 +0,0 @@
|
|||||||
`postmarketos-mkinitfs` is a tool for generating an initramfs (and installing
|
|
||||||
it) on postmarketOS.
|
|
||||||
|
|
||||||
## Building
|
|
||||||
|
|
||||||
Building this project requires a Go compiler/toolchain and `make`:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ make
|
|
||||||
```
|
|
||||||
|
|
||||||
To install locally:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ make install
|
|
||||||
```
|
|
||||||
|
|
||||||
Installation prefix can be set in the generally accepted way with setting
|
|
||||||
`PREFIX`:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ make PREFIX=/some/location
|
|
||||||
# make PREFIX=/some/location install
|
|
||||||
```
|
|
||||||
|
|
||||||
Other paths can be modified from the command line as well, see the top section of
|
|
||||||
the `Makefile` for more information.
|
|
||||||
|
|
||||||
Tests (functional and linting) can be executed by using the `test` make target:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ make test
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
The application uses configuration from `/etc/deviceinfo`, and does not support
|
|
||||||
any other options at runtime. It can be run simply by executing:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ postmarketos-mkinitfs
|
|
||||||
```
|
|
||||||
|
|
||||||
For historical reasons, a symlink from `mkinitfs` to `postmarketos-mkinitfs` is
|
|
||||||
also installed by the makefile's `install` target.
|
|
2
go.mod
2
go.mod
@@ -3,6 +3,8 @@ module gitlab.com/postmarketOS/postmarketos-mkinitfs
|
|||||||
go 1.16
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
git.sr.ht/~sircmpwn/getopt v0.0.0-20201218204720-9961a9c6298f
|
||||||
|
github.com/BurntSushi/toml v0.4.0
|
||||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e
|
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e
|
||||||
github.com/klauspost/compress v1.13.3 // indirect
|
github.com/klauspost/compress v1.13.3 // indirect
|
||||||
github.com/klauspost/pgzip v1.2.5
|
github.com/klauspost/pgzip v1.2.5
|
||||||
|
21
go.sum
21
go.sum
@@ -1,9 +1,30 @@
|
|||||||
|
git.sr.ht/~sircmpwn/getopt v0.0.0-20201218204720-9961a9c6298f h1:f5axCdaRzGDCihN3o1Lq0ydn0VlkhY+11G0JOyY5qss=
|
||||||
|
git.sr.ht/~sircmpwn/getopt v0.0.0-20201218204720-9961a9c6298f/go.mod h1:wMEGFFFNuPos7vHmWXfszqImLppbc0wEhh6JBfJIUgw=
|
||||||
|
github.com/BurntSushi/toml v0.3.2-0.20210614224209-34d990aa228d/go.mod h1:2QZjSXA5e+XyFeCAxxtL8Z4StYUsTquL8ODGPR3C3MA=
|
||||||
|
github.com/BurntSushi/toml v0.3.2-0.20210621044154-20a94d639b8e/go.mod h1:t4zg8TkHfP16Vb3x4WKIw7zVYMit5QFtPEO8lOWxzTg=
|
||||||
|
github.com/BurntSushi/toml v0.3.2-0.20210624061728-01bfc69d1057/go.mod h1:NMj2lD5LfMqcE0w8tnqOsH6944oaqpI1974lrIwerfE=
|
||||||
|
github.com/BurntSushi/toml v0.3.2-0.20210704081116-ccff24ee4463/go.mod h1:EkRrMiQQmfxK6kIldz3QbPlhmVkrjW1RDJUnbDqGYvc=
|
||||||
|
github.com/BurntSushi/toml v0.4.0 h1:qD/r9AL67srjW6O3fcSKZDsXqzBNX6ieSRywr2hRrdE=
|
||||||
|
github.com/BurntSushi/toml v0.4.0/go.mod h1:wtejDu7Q0FhCWAo2aXkywSJyYFg01EDTKozLNCz2JBA=
|
||||||
|
github.com/BurntSushi/toml-test v0.1.1-0.20210620192437-de01089bbf76/go.mod h1:P/PrhmZ37t5llHfDuiouWXtFgqOoQ12SAh9j6EjrBR4=
|
||||||
|
github.com/BurntSushi/toml-test v0.1.1-0.20210624055653-1f6389604dc6/go.mod h1:UAIt+Eo8itMZAAgImXkPGDMYsT1SsJkVdB5TuONl86A=
|
||||||
|
github.com/BurntSushi/toml-test v0.1.1-0.20210704062846-269931e74e3f/go.mod h1:fnFWrIwqgHsEjVsW3RYCJmDo86oq9eiJ9u6bnqhtm2g=
|
||||||
|
github.com/BurntSushi/toml-test v0.1.1-0.20210723065233-facb9eccd4da h1:2QGUaQtV2u8V1USTI883wo+uxtZFAiZ4TCNupHJ98IU=
|
||||||
|
github.com/BurntSushi/toml-test v0.1.1-0.20210723065233-facb9eccd4da/go.mod h1:ve9Q/RRu2vHi42LocPLNvagxuUJh993/95b18bw/Nws=
|
||||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RSSp2Om9lubZpiMgVbvn39bsUmW9U5h0twqc=
|
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RSSp2Om9lubZpiMgVbvn39bsUmW9U5h0twqc=
|
||||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
|
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
|
||||||
|
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/klauspost/compress v1.13.3 h1:BtAvtV1+h0YwSVwWoYXMREPpYu9VzTJ9QDI1TEg/iQQ=
|
github.com/klauspost/compress v1.13.3 h1:BtAvtV1+h0YwSVwWoYXMREPpYu9VzTJ9QDI1TEg/iQQ=
|
||||||
github.com/klauspost/compress v1.13.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
github.com/klauspost/compress v1.13.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
zgo.at/zli v0.0.0-20210619044753-e7020a328e59/go.mod h1:HLAc12TjNGT+VRXr76JnsNE3pbooQtwKWhX+RlDjQ2Y=
|
||||||
|
621
main.go
621
main.go
@@ -1,23 +1,22 @@
|
|||||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"debug/elf"
|
"debug/elf"
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"git.sr.ht/~sircmpwn/getopt"
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/archive"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/archive"
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/deviceinfo"
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
||||||
@@ -29,22 +28,21 @@ func timeFunc(start time.Time, name string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
deviceinfoFile := "/etc/deviceinfo"
|
devinfo, err := deviceinfo.ReadDeviceinfo()
|
||||||
if !exists(deviceinfoFile) {
|
if err != nil {
|
||||||
log.Print("NOTE: deviceinfo (from device package) not installed yet, " +
|
log.Print("NOTE: deviceinfo (from device package) not installed yet, " +
|
||||||
"not building the initramfs now (it should get built later " +
|
"not building the initramfs now (it should get built later " +
|
||||||
"automatically.)")
|
"automatically.)")
|
||||||
return
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
devinfo, err := deviceinfo.ReadDeviceinfo(deviceinfoFile)
|
var outDir string
|
||||||
if err != nil {
|
getopt.StringVar(&outDir, "d", "/boot", "Directory to output initfs(-extra) and other boot files, default: /boot")
|
||||||
|
|
||||||
|
if err := getopt.Parse(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
outDir := flag.String("d", "/boot", "Directory to output initfs(-extra) and other boot files")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
defer timeFunc(time.Now(), "mkinitfs")
|
defer timeFunc(time.Now(), "mkinitfs")
|
||||||
|
|
||||||
kernVer, err := getKernelVersion()
|
kernVer, err := getKernelVersion()
|
||||||
@@ -52,35 +50,31 @@ func main() {
|
|||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// temporary working dir
|
// temporary working dir
|
||||||
workDir, err := os.MkdirTemp("", "mkinitfs")
|
workDir, err := ioutil.TempDir("", "mkinitfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("Unable to create temporary work directory:", err)
|
log.Fatal("Unable to create temporary work directory:", err)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(workDir)
|
defer os.RemoveAll(workDir)
|
||||||
|
|
||||||
log.Print("Generating for kernel version: ", kernVer)
|
log.Print("Generating for kernel version: ", kernVer)
|
||||||
log.Print("Output directory: ", *outDir)
|
log.Print("Output directory: ", outDir)
|
||||||
|
|
||||||
if err := generateInitfs("initramfs", workDir, kernVer, devinfo); err != nil {
|
if err := generateInitfs("initramfs", workDir, kernVer, devinfo); err != nil {
|
||||||
log.Fatal("generateInitfs: ", err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := generateInitfsExtra("initramfs-extra", workDir, devinfo); err != nil {
|
if err := generateInitfsExtra("initramfs-extra", workDir, devinfo); err != nil {
|
||||||
log.Fatal("generateInitfsExtra: ", err)
|
log.Fatal(err)
|
||||||
}
|
|
||||||
|
|
||||||
if err := copyUbootFiles(workDir, devinfo); errors.Is(err, os.ErrNotExist) {
|
|
||||||
log.Println("u-boot files copying skipped: ", err)
|
|
||||||
} else {
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("copyUbootFiles: ", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Final processing of initramfs / kernel is done by boot-deploy
|
// Final processing of initramfs / kernel is done by boot-deploy
|
||||||
if err := bootDeploy(workDir, *outDir); err != nil {
|
if err := bootDeploy(workDir, outDir); err != nil {
|
||||||
log.Fatal("bootDeploy: ", err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -93,29 +87,18 @@ func bootDeploy(workDir string, outDir string) error {
|
|||||||
if len(kernels) == 0 {
|
if len(kernels) == 0 {
|
||||||
return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*"))
|
return errors.New("Unable to find any kernels at " + filepath.Join(outDir, "vmlinuz*"))
|
||||||
}
|
}
|
||||||
|
kernFile, err := os.Open(kernels[0])
|
||||||
// Pick a kernel that does not have suffixes added by boot-deploy
|
|
||||||
var kernFile string
|
|
||||||
for _, f := range kernels {
|
|
||||||
if strings.HasSuffix(f, "-dtb") || strings.HasSuffix(f, "-mtk") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
kernFile = f
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
kernFd, err := os.Open(kernFile)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer kernFd.Close()
|
defer kernFile.Close()
|
||||||
|
|
||||||
kernFileCopy, err := os.Create(filepath.Join(workDir, "vmlinuz"))
|
kernFileCopy, err := os.Create(filepath.Join(workDir, "vmlinuz"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = io.Copy(kernFileCopy, kernFd); err != nil {
|
if _, err = io.Copy(kernFileCopy, kernFile); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
kernFileCopy.Close()
|
kernFileCopy.Close()
|
||||||
@@ -128,19 +111,33 @@ func bootDeploy(workDir string, outDir string) error {
|
|||||||
"-o", outDir,
|
"-o", outDir,
|
||||||
"initramfs-extra")
|
"initramfs-extra")
|
||||||
if !exists(cmd.Path) {
|
if !exists(cmd.Path) {
|
||||||
return errors.New("boot-deploy command not found")
|
return errors.New("boot-deploy command not found.")
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.Stdout = os.Stdout
|
cmd.Stdout = os.Stdout
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = os.Stderr
|
||||||
|
// err is ignored, since shellcheck will return != 0 if there are issues
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
log.Print("'boot-deploy' command failed")
|
log.Print("'boot-deploy' command failed: ")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createInitfsRootDirs(initfsRoot string) {
|
||||||
|
dirs := []string{
|
||||||
|
"/bin", "/sbin", "/usr/bin", "/usr/lib", "/usr/sbin", "/proc", "/sys",
|
||||||
|
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, dir := range dirs {
|
||||||
|
if err := os.MkdirAll(filepath.Join(initfsRoot, dir), os.FileMode(0775)); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func exists(file string) bool {
|
func exists(file string) bool {
|
||||||
if _, err := os.Stat(file); err == nil {
|
if _, err := os.Stat(file); err == nil {
|
||||||
return true
|
return true
|
||||||
@@ -148,72 +145,72 @@ func exists(file string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func getHookFiles(filesdir string) (files []string, err error) {
|
func getHookFiles(filesdir string) misc.StringSet {
|
||||||
fileInfo, err := os.ReadDir(filesdir)
|
fileInfo, err := ioutil.ReadDir(filesdir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
files := make(misc.StringSet)
|
||||||
for _, file := range fileInfo {
|
for _, file := range fileInfo {
|
||||||
path := filepath.Join(filesdir, file.Name())
|
path := filepath.Join(filesdir, file.Name())
|
||||||
f, err := os.Open(path)
|
f, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
s := bufio.NewScanner(f)
|
s := bufio.NewScanner(f)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
if filelist, err := getFiles([]string{s.Text()}, true); err != nil {
|
if !exists(s.Text()) {
|
||||||
return nil, fmt.Errorf("unable to find file %q required by %q", s.Text(), path)
|
log.Fatalf("Unable to find file %q required by %q", s.Text(), path)
|
||||||
} else {
|
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
}
|
||||||
|
files[s.Text()] = false
|
||||||
}
|
}
|
||||||
if err := s.Err(); err != nil {
|
if err := s.Err(); err != nil {
|
||||||
return nil, err
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return files, nil
|
return files
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recursively list all dependencies for a given ELF binary
|
// Recursively list all dependencies for a given ELF binary
|
||||||
func getBinaryDeps(file string) (files []string, err error) {
|
func getBinaryDeps(files misc.StringSet, file string) error {
|
||||||
// if file is a symlink, resolve dependencies for target
|
// if file is a symlink, resolve dependencies for target
|
||||||
fileStat, err := os.Lstat(file)
|
fileStat, err := os.Lstat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getBinaryDeps: failed to stat file %q: %w", file, err)
|
log.Print("getBinaryDeps: failed to stat file")
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Symlink: write symlink to archive then set 'file' to link target
|
// Symlink: write symlink to archive then set 'file' to link target
|
||||||
if fileStat.Mode()&os.ModeSymlink != 0 {
|
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||||
target, err := os.Readlink(file)
|
target, err := os.Readlink(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getBinaryDeps: unable to read symlink %q: %w", file, err)
|
log.Print("getBinaryDeps: unable to read symlink: ", file)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
if !filepath.IsAbs(target) {
|
if !filepath.IsAbs(target) {
|
||||||
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return files, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
binaryDepFiles, err := getBinaryDeps(target)
|
if err := getBinaryDeps(files, target); err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return files, err
|
|
||||||
}
|
}
|
||||||
files = append(files, binaryDepFiles...)
|
return err
|
||||||
return files, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// get dependencies for binaries
|
// get dependencies for binaries
|
||||||
fd, err := elf.Open(file)
|
fd, err := elf.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getBinaryDeps: unable to open elf binary %q: %w", file, err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
libs, _ := fd.ImportedLibraries()
|
libs, _ := fd.ImportedLibraries()
|
||||||
fd.Close()
|
fd.Close()
|
||||||
files = append(files, file)
|
files[file] = false
|
||||||
|
|
||||||
if len(libs) == 0 {
|
if len(libs) == 0 {
|
||||||
return files, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
libdirs := []string{"/usr/lib", "/lib"}
|
libdirs := []string{"/usr/lib", "/lib"}
|
||||||
@@ -222,96 +219,55 @@ func getBinaryDeps(file string) (files []string, err error) {
|
|||||||
for _, libdir := range libdirs {
|
for _, libdir := range libdirs {
|
||||||
path := filepath.Join(libdir, lib)
|
path := filepath.Join(libdir, lib)
|
||||||
if _, err := os.Stat(path); err == nil {
|
if _, err := os.Stat(path); err == nil {
|
||||||
binaryDepFiles, err := getBinaryDeps(path)
|
err := getBinaryDeps(files, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return files, err
|
return err
|
||||||
}
|
}
|
||||||
files = append(files, binaryDepFiles...)
|
files[path] = false
|
||||||
files = append(files, path)
|
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !found {
|
if !found {
|
||||||
return nil, fmt.Errorf("getBinaryDeps: unable to locate dependency for %q: %s", file, lib)
|
log.Fatalf("Unable to locate dependency for %q: %s", file, lib)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFiles(list []string, required bool) (files []string, err error) {
|
func getFiles(files misc.StringSet, newFiles misc.StringSet, required bool) error {
|
||||||
for _, file := range list {
|
for file := range newFiles {
|
||||||
filelist, err := getFile(file, required)
|
err := getFile(files, file, required)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
files = misc.RemoveDuplicates(files)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFile(file string, required bool) (files []string, err error) {
|
func getFile(files misc.StringSet, file string, required bool) error {
|
||||||
// Expand glob expression
|
if !exists(file) {
|
||||||
expanded, err := filepath.Glob(file)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(expanded) > 0 && expanded[0] != file {
|
|
||||||
for _, path := range expanded {
|
|
||||||
if globFiles, err := getFile(path, required); err != nil {
|
|
||||||
return files, err
|
|
||||||
} else {
|
|
||||||
files = append(files, globFiles...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return misc.RemoveDuplicates(files), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
fileInfo, err := os.Stat(file)
|
|
||||||
if err != nil {
|
|
||||||
if required {
|
if required {
|
||||||
return files, errors.New("getFile: File does not exist :" + file)
|
return errors.New("getFile: File does not exist :" + file)
|
||||||
}
|
}
|
||||||
return files, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if fileInfo.IsDir() {
|
files[file] = false
|
||||||
// Recurse over directory contents
|
|
||||||
err := filepath.Walk(file, func(path string, f os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if f.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
newFiles, err := getFile(path, required)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
files = append(files, newFiles...)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return files, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
files = append(files, file)
|
|
||||||
|
|
||||||
// get dependencies for binaries
|
// get dependencies for binaries
|
||||||
if _, err := elf.Open(file); err == nil {
|
if _, err := elf.Open(file); err != nil {
|
||||||
if binaryDepFiles, err := getBinaryDeps(file); err != nil {
|
// file is not an elf, so don't resolve lib dependencies
|
||||||
return files, err
|
return nil
|
||||||
} else {
|
|
||||||
files = append(files, binaryDepFiles...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
files = misc.RemoveDuplicates(files)
|
err := getBinaryDeps(files, file)
|
||||||
return
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getOskConfFontPath(oskConfPath string) (string, error) {
|
func getOskConfFontPath(oskConfPath string) (string, error) {
|
||||||
@@ -338,199 +294,166 @@ func getOskConfFontPath(oskConfPath string) (string, error) {
|
|||||||
|
|
||||||
// Get a list of files and their dependencies related to supporting rootfs full
|
// Get a list of files and their dependencies related to supporting rootfs full
|
||||||
// disk (d)encryption
|
// disk (d)encryption
|
||||||
func getFdeFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) {
|
func getFdeFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||||
confFiles := []string{
|
confFiles := misc.StringSet{
|
||||||
"/etc/osk.conf",
|
"/etc/osk.conf": false,
|
||||||
"/etc/ts.conf",
|
"/etc/ts.conf": false,
|
||||||
"/etc/pointercal",
|
"/etc/pointercal": false,
|
||||||
"/etc/fb.modes",
|
"/etc/fb.modes": false,
|
||||||
"/etc/directfbrc",
|
"/etc/directfbrc": false,
|
||||||
}
|
}
|
||||||
// TODO: this shouldn't be false? though some files (pointercal) don't always exist...
|
// TODO: this shouldn't be false? though some files (pointercal) don't always exist...
|
||||||
if files, err = getFiles(confFiles, false); err != nil {
|
if err := getFiles(files, confFiles, false); err != nil {
|
||||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// osk-sdl
|
// osk-sdl
|
||||||
oskFiles := []string{
|
oskFiles := misc.StringSet{
|
||||||
"/usr/bin/osk-sdl",
|
"/usr/bin/osk-sdl": false,
|
||||||
"/sbin/cryptsetup",
|
"/sbin/cryptsetup": false,
|
||||||
"/usr/lib/libGL.so.1",
|
"/usr/lib/libGL.so.1": false}
|
||||||
}
|
if err := getFiles(files, oskFiles, true); err != nil {
|
||||||
if filelist, err := getFiles(oskFiles, true); err != nil {
|
return err
|
||||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
|
||||||
} else {
|
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fontFile, err := getOskConfFontPath("/etc/osk.conf")
|
fontFile, err := getOskConfFontPath("/etc/osk.conf")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getFdeFiles: failed to add file %q: %w", fontFile, err)
|
return err
|
||||||
}
|
}
|
||||||
files = append(files, fontFile)
|
files[fontFile] = false
|
||||||
|
|
||||||
// Directfb
|
// Directfb
|
||||||
dfbFiles := []string{}
|
dfbFiles := make(misc.StringSet)
|
||||||
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
|
err = filepath.Walk("/usr/lib/directfb-1.7-7", func(path string, f os.FileInfo, err error) error {
|
||||||
if filepath.Ext(path) == ".so" {
|
if filepath.Ext(path) == ".so" {
|
||||||
dfbFiles = append(dfbFiles, path)
|
dfbFiles[path] = false
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getFdeFiles: failed to add file %w", err)
|
log.Print("getBinaryDeps: failed to stat file")
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
if filelist, err := getFiles(dfbFiles, true); err != nil {
|
if err := getFiles(files, dfbFiles, true); err != nil {
|
||||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
return err
|
||||||
} else {
|
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// tslib
|
// tslib
|
||||||
tslibFiles := []string{}
|
tslibFiles := make(misc.StringSet)
|
||||||
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
|
err = filepath.Walk("/usr/lib/ts", func(path string, f os.FileInfo, err error) error {
|
||||||
if filepath.Ext(path) == ".so" {
|
if filepath.Ext(path) == ".so" {
|
||||||
tslibFiles = append(tslibFiles, path)
|
tslibFiles[path] = false
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getFdeFiles: failed to add file: %w", err)
|
log.Print("getBinaryDeps: failed to stat file")
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
libts, _ := filepath.Glob("/usr/lib/libts*")
|
libts, _ := filepath.Glob("/usr/lib/libts*")
|
||||||
tslibFiles = append(tslibFiles, libts...)
|
for _, file := range libts {
|
||||||
if filelist, err := getFiles(tslibFiles, true); err != nil {
|
tslibFiles[file] = false
|
||||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
}
|
||||||
} else {
|
if err = getFiles(files, tslibFiles, true); err != nil {
|
||||||
files = append(files, filelist...)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// mesa hw accel
|
// mesa hw accel
|
||||||
if devinfo.MesaDriver != "" {
|
if devinfo.Deviceinfo_mesa_driver != "" {
|
||||||
mesaFiles := []string{
|
mesaFiles := misc.StringSet{
|
||||||
"/usr/lib/libEGL.so.1",
|
"/usr/lib/libEGL.so.1": false,
|
||||||
"/usr/lib/libGLESv2.so.2",
|
"/usr/lib/libGLESv2.so.2": false,
|
||||||
"/usr/lib/libgbm.so.1",
|
"/usr/lib/libgbm.so.1": false,
|
||||||
"/usr/lib/libudev.so.1",
|
"/usr/lib/libudev.so.1": false,
|
||||||
"/usr/lib/xorg/modules/dri/" + devinfo.MesaDriver + "_dri.so",
|
"/usr/lib/xorg/modules/dri/" + devinfo.Deviceinfo_mesa_driver + "_dri.so": false,
|
||||||
}
|
}
|
||||||
if filelist, err := getFiles(mesaFiles, true); err != nil {
|
if err := getFiles(files, mesaFiles, true); err != nil {
|
||||||
return nil, fmt.Errorf("getFdeFiles: failed to add files: %w", err)
|
return err
|
||||||
} else {
|
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getHookScripts() (files []string) {
|
func getHookScripts(files misc.StringSet) {
|
||||||
scripts, _ := filepath.Glob("/etc/postmarketos-mkinitfs/hooks/*.sh")
|
scripts, _ := filepath.Glob("/etc/postmarketos-mkinitfs/hooks/*.sh")
|
||||||
files = append(files, scripts...)
|
for _, script := range scripts {
|
||||||
|
files[script] = false
|
||||||
return
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInitfsExtraFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) {
|
func getInitfsExtraFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||||
log.Println("== Generating initramfs extra ==")
|
log.Println("== Generating initramfs extra ==")
|
||||||
binariesExtra := []string{
|
binariesExtra := misc.StringSet{
|
||||||
"/lib/libz.so.1",
|
"/lib/libz.so.1": false,
|
||||||
"/sbin/btrfs",
|
"/sbin/dmsetup": false,
|
||||||
"/sbin/dmsetup",
|
"/sbin/e2fsck": false,
|
||||||
"/sbin/e2fsck",
|
"/usr/sbin/parted": false,
|
||||||
"/usr/sbin/parted",
|
"/usr/sbin/resize2fs": false,
|
||||||
"/usr/sbin/resize2fs",
|
"/usr/sbin/resize.f2fs": false,
|
||||||
"/usr/sbin/resize.f2fs",
|
|
||||||
}
|
}
|
||||||
log.Println("- Including extra binaries")
|
log.Println("- Including extra binaries")
|
||||||
if filelist, err := getFiles(binariesExtra, true); err != nil {
|
if err := getFiles(files, binariesExtra, true); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
} else {
|
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hook files & scripts
|
|
||||||
if exists("/etc/postmarketos-mkinitfs/files-extra") {
|
|
||||||
log.Println("- Including hook files")
|
|
||||||
var hookFiles []string
|
|
||||||
hookFiles, err := getHookFiles("/etc/postmarketos-mkinitfs/files-extra")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if filelist, err := getFiles(hookFiles, true); err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else {
|
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if exists("/usr/bin/osk-sdl") {
|
if exists("/usr/bin/osk-sdl") {
|
||||||
log.Println("- Including FDE support")
|
log.Println("- Including FDE support")
|
||||||
if fdeFiles, err := getFdeFiles(devinfo); err != nil {
|
if err := getFdeFiles(files, devinfo); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
} else {
|
|
||||||
files = append(files, fdeFiles...)
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Println("- *NOT* including FDE support")
|
log.Println("- *NOT* including FDE support")
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInitfsFiles(devinfo deviceinfo.DeviceInfo) (files []string, err error) {
|
func getInitfsFiles(files misc.StringSet, devinfo deviceinfo.DeviceInfo) error {
|
||||||
log.Println("== Generating initramfs ==")
|
log.Println("== Generating initramfs ==")
|
||||||
requiredFiles := []string{
|
requiredFiles := misc.StringSet{
|
||||||
"/bin/busybox",
|
"/bin/busybox": false,
|
||||||
"/bin/sh",
|
"/bin/sh": false,
|
||||||
"/bin/busybox-extras",
|
"/bin/busybox-extras": false,
|
||||||
"/usr/sbin/telnetd",
|
"/usr/sbin/telnetd": false,
|
||||||
"/sbin/kpartx",
|
"/sbin/kpartx": false,
|
||||||
"/etc/deviceinfo",
|
"/etc/deviceinfo": false,
|
||||||
"/usr/bin/unudhcpd",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hook files & scripts
|
// Hook files & scripts
|
||||||
if exists("/etc/postmarketos-mkinitfs/files") {
|
if exists("/etc/postmarketos-mkinitfs/files") {
|
||||||
log.Println("- Including hook files")
|
log.Println("- Including hook files")
|
||||||
if hookFiles, err := getHookFiles("/etc/postmarketos-mkinitfs/files"); err != nil {
|
hookFiles := getHookFiles("/etc/postmarketos-mkinitfs/files")
|
||||||
return nil, err
|
if err := getFiles(files, hookFiles, true); err != nil {
|
||||||
} else {
|
return err
|
||||||
if filelist, err := getFiles(hookFiles, true); err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else {
|
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Println("- Including hook scripts")
|
log.Println("- Including hook scripts")
|
||||||
hookScripts := getHookScripts()
|
getHookScripts(files)
|
||||||
files = append(files, hookScripts...)
|
|
||||||
|
|
||||||
log.Println("- Including required binaries")
|
log.Println("- Including required binaries")
|
||||||
if filelist, err := getFiles(requiredFiles, true); err != nil {
|
if err := getFiles(files, requiredFiles, true); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
} else {
|
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []string, err error) {
|
func getInitfsModules(files misc.StringSet, devinfo deviceinfo.DeviceInfo, kernelVer string) error {
|
||||||
log.Println("- Including kernel modules")
|
log.Println("- Including kernel modules")
|
||||||
|
|
||||||
modDir := filepath.Join("/lib/modules", kernelVer)
|
modDir := filepath.Join("/lib/modules", kernelVer)
|
||||||
if !exists(modDir) {
|
if !exists(modDir) {
|
||||||
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
||||||
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// modules.* required by modprobe
|
// modules.* required by modprobe
|
||||||
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
|
modprobeFiles, _ := filepath.Glob(filepath.Join(modDir, "modules.*"))
|
||||||
files = append(files, modprobeFiles...)
|
for _, file := range modprobeFiles {
|
||||||
|
files[file] = false
|
||||||
|
}
|
||||||
|
|
||||||
// module name (without extension), or directory (trailing slash is important! globs OK)
|
// module name (without extension), or directory (trailing slash is important! globs OK)
|
||||||
requiredModules := []string{
|
requiredModules := []string{
|
||||||
@@ -548,30 +471,29 @@ func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []
|
|||||||
dir = filepath.Join(modDir, dir)
|
dir = filepath.Join(modDir, dir)
|
||||||
dirs, _ := filepath.Glob(dir)
|
dirs, _ := filepath.Glob(dir)
|
||||||
for _, d := range dirs {
|
for _, d := range dirs {
|
||||||
if filelist, err := getModulesInDir(d); err != nil {
|
if err := getModulesInDir(files, d); err != nil {
|
||||||
return nil, fmt.Errorf("getInitfsModules: unable to get modules dir %q: %w", d, err)
|
log.Print("Unable to get modules in dir: ", d)
|
||||||
} else {
|
return err
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
continue
|
||||||
} else if dir == "" {
|
} else if dir == "" {
|
||||||
// item is a module name
|
// item is a module name
|
||||||
if filelist, err := getModule(file, modDir); err != nil {
|
if err := getModule(files, file, modDir); err != nil {
|
||||||
return nil, fmt.Errorf("getInitfsModules: unable to get module %q: %w", file, err)
|
log.Print("Unable to get module: ", file)
|
||||||
} else {
|
return err
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
}
|
||||||
|
continue
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Unknown module entry: %q", item)
|
log.Printf("Unknown module entry: %q", item)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// deviceinfo modules
|
// deviceinfo modules
|
||||||
for _, module := range strings.Fields(devinfo.ModulesInitfs) {
|
for _, module := range strings.Fields(devinfo.Deviceinfo_modules_initfs) {
|
||||||
if filelist, err := getModule(module, modDir); err != nil {
|
if err := getModule(files, module, modDir); err != nil {
|
||||||
return nil, fmt.Errorf("getInitfsModules: unable to get modules from deviceinfo: %w", err)
|
log.Print("Unable to get modules from deviceinfo")
|
||||||
} else {
|
return err
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -580,27 +502,27 @@ func getInitfsModules(devinfo deviceinfo.DeviceInfo, kernelVer string) (files []
|
|||||||
for _, modFile := range initfsModFiles {
|
for _, modFile := range initfsModFiles {
|
||||||
f, err := os.Open(modFile)
|
f, err := os.Open(modFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getInitfsModules: unable to open mkinitfs modules file %q: %w", modFile, err)
|
log.Print("getInitfsModules: unable to open mkinitfs modules file: ", modFile)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
s := bufio.NewScanner(f)
|
s := bufio.NewScanner(f)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
if filelist, err := getModule(s.Text(), modDir); err != nil {
|
if err := getModule(files, s.Text(), modDir); err != nil {
|
||||||
return nil, fmt.Errorf("getInitfsModules: unable to get module file %q: %w", s.Text(), err)
|
log.Print("getInitfsModules: unable to get module file: ", s.Text())
|
||||||
} else {
|
return err
|
||||||
files = append(files, filelist...)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getKernelReleaseFile() (string, error) {
|
func getKernelReleaseFile() (string, error) {
|
||||||
files, _ := filepath.Glob("/usr/share/kernel/*/kernel.release")
|
files, _ := filepath.Glob("/usr/share/kernel/*/kernel.release")
|
||||||
// only one kernel flavor supported
|
// only one kernel flavor supported
|
||||||
if len(files) != 1 {
|
if len(files) != 1 {
|
||||||
return "", fmt.Errorf("only one kernel release/flavor is supported, found: %q", files)
|
return "", errors.New(fmt.Sprintf("Only one kernel release/flavor is supported, found: %q", files))
|
||||||
}
|
}
|
||||||
|
|
||||||
return files[0], nil
|
return files[0], nil
|
||||||
@@ -622,50 +544,6 @@ func getKernelVersion() (string, error) {
|
|||||||
return strings.TrimSpace(string(contents)), nil
|
return strings.TrimSpace(string(contents)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Copy(srcFile, dstFile string) error {
|
|
||||||
out, err := os.Create(dstFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer out.Close()
|
|
||||||
|
|
||||||
in, err := os.Open(srcFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer in.Close()
|
|
||||||
|
|
||||||
_, err = io.Copy(out, in)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyUbootFiles(path string, devinfo deviceinfo.DeviceInfo) error {
|
|
||||||
if devinfo.UbootBoardname == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
srcDir := filepath.Join("/usr/share/u-boot", devinfo.UbootBoardname)
|
|
||||||
entries, err := os.ReadDir(srcDir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, entry := range entries {
|
|
||||||
sourcePath := filepath.Join(srcDir, entry.Name())
|
|
||||||
destPath := filepath.Join(path, entry.Name())
|
|
||||||
|
|
||||||
if err := Copy(sourcePath, destPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo.DeviceInfo) error {
|
func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo.DeviceInfo) error {
|
||||||
initfsArchive, err := archive.New()
|
initfsArchive, err := archive.New()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -677,40 +555,18 @@ func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo
|
|||||||
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
|
"/dev", "/tmp", "/lib", "/boot", "/sysroot", "/etc",
|
||||||
}
|
}
|
||||||
for _, dir := range requiredDirs {
|
for _, dir := range requiredDirs {
|
||||||
if err := initfsArchive.AddItem(dir, dir); err != nil {
|
initfsArchive.Dirs[dir] = false
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if files, err := getInitfsFiles(devinfo); err != nil {
|
if err := getInitfsFiles(initfsArchive.Files, devinfo); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
|
||||||
items := make(map[string]string)
|
|
||||||
// copy files into a map, where the source(key) and dest(value) are the
|
|
||||||
// same
|
|
||||||
for _, f := range files {
|
|
||||||
items[f] = f
|
|
||||||
}
|
|
||||||
if err := initfsArchive.AddItems(items); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if files, err := getInitfsModules(devinfo, kernVer); err != nil {
|
if err := getInitfsModules(initfsArchive.Files, devinfo, kernVer); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
|
||||||
items := make(map[string]string)
|
|
||||||
// copy files into a map, where the source(key) and dest(value) are the
|
|
||||||
// same
|
|
||||||
for _, f := range files {
|
|
||||||
items[f] = f
|
|
||||||
}
|
|
||||||
if err := initfsArchive.AddItems(items); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := initfsArchive.AddItem("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil {
|
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init.sh", "/init"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -719,13 +575,13 @@ func generateInitfs(name string, path string, kernVer string, devinfo deviceinfo
|
|||||||
splashFiles, _ := filepath.Glob("/usr/share/postmarketos-splashes/*.ppm.gz")
|
splashFiles, _ := filepath.Glob("/usr/share/postmarketos-splashes/*.ppm.gz")
|
||||||
for _, file := range splashFiles {
|
for _, file := range splashFiles {
|
||||||
// splash images are expected at /<file>
|
// splash images are expected at /<file>
|
||||||
if err := initfsArchive.AddItem(file, filepath.Join("/", filepath.Base(file))); err != nil {
|
if err := initfsArchive.AddFile(file, filepath.Join("/", filepath.Base(file))); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// initfs_functions
|
// initfs_functions
|
||||||
if err := initfsArchive.AddItem("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil {
|
if err := initfsArchive.AddFile("/usr/share/postmarketos-mkinitfs/init_functions.sh", "/init_functions.sh"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -743,19 +599,8 @@ func generateInitfsExtra(name string, path string, devinfo deviceinfo.DeviceInfo
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if files, err := getInitfsExtraFiles(devinfo); err != nil {
|
if err := getInitfsExtraFiles(initfsExtraArchive.Files, devinfo); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
|
||||||
|
|
||||||
items := make(map[string]string)
|
|
||||||
// copy files into a map, where the source(key) and dest(value) are the
|
|
||||||
// same
|
|
||||||
for _, f := range files {
|
|
||||||
items[f] = f
|
|
||||||
}
|
|
||||||
if err := initfsExtraArchive.AddItems(items); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Println("- Writing and verifying initramfs-extra archive")
|
log.Println("- Writing and verifying initramfs-extra archive")
|
||||||
@@ -767,23 +612,29 @@ func generateInitfsExtra(name string, path string, devinfo deviceinfo.DeviceInfo
|
|||||||
}
|
}
|
||||||
|
|
||||||
func stripExts(file string) string {
|
func stripExts(file string) string {
|
||||||
return strings.Split(file, ".")[0]
|
for {
|
||||||
|
if filepath.Ext(file) == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
file = strings.TrimSuffix(file, filepath.Ext(file))
|
||||||
|
}
|
||||||
|
return file
|
||||||
}
|
}
|
||||||
|
|
||||||
func getModulesInDir(modPath string) (files []string, err error) {
|
func getModulesInDir(files misc.StringSet, modPath string) error {
|
||||||
err = filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error {
|
err := filepath.Walk(modPath, func(path string, f os.FileInfo, err error) error {
|
||||||
// TODO: need to support more extensions?
|
// TODO: need to support more extensions?
|
||||||
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
|
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
files = append(files, path)
|
files[path] = false
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
|
// Given a module name, e.g. 'dwc_wdt', resolve the full path to the module
|
||||||
@@ -792,56 +643,62 @@ func getModulesInDir(modPath string) (files []string, err error) {
|
|||||||
// have been built into the kernel
|
// have been built into the kernel
|
||||||
// TODO: look for it in modules.builtin, and make it fatal if it can't be found
|
// TODO: look for it in modules.builtin, and make it fatal if it can't be found
|
||||||
// anywhere
|
// anywhere
|
||||||
func getModule(modName string, modDir string) (files []string, err error) {
|
func getModule(files misc.StringSet, modName string, modDir string) error {
|
||||||
|
|
||||||
modDep := filepath.Join(modDir, "modules.dep")
|
deps, err := getModuleDeps(modName, modDir)
|
||||||
if !exists(modDep) {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("kernel module.dep not found: %s", modDir)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fd, err := os.Open(modDep)
|
if len(deps) == 0 {
|
||||||
if err != nil {
|
// retry and swap - and _ in module name
|
||||||
return nil, fmt.Errorf("unable to open modules.dep: %w", err)
|
if strings.Contains(modName, "-") {
|
||||||
}
|
modName = strings.ReplaceAll(modName, "-", "_")
|
||||||
defer fd.Close()
|
} else {
|
||||||
|
modName = strings.ReplaceAll(modName, "_", "-")
|
||||||
deps, err := getModuleDeps(modName, fd)
|
}
|
||||||
if err != nil {
|
deps, err = getModuleDeps(modName, modDir)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, dep := range deps {
|
for _, dep := range deps {
|
||||||
p := filepath.Join(modDir, dep)
|
p := filepath.Join(modDir, dep)
|
||||||
if !exists(p) {
|
if !exists(p) {
|
||||||
return nil, fmt.Errorf("tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p)
|
log.Print(fmt.Sprintf("Tried to include a module that doesn't exist in the modules directory (%s): %s", modDir, p))
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
files = append(files, p)
|
files[p] = false
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the canonicalized name for the module as represented in the given modules.dep io.reader
|
func getModuleDeps(modName string, modDir string) ([]string, error) {
|
||||||
func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
|
|
||||||
var deps []string
|
var deps []string
|
||||||
|
|
||||||
// split the module name on - and/or _, build a regex for matching
|
modDep := filepath.Join(modDir, "modules.dep")
|
||||||
splitRe := regexp.MustCompile("[-_]+")
|
if !exists(modDep) {
|
||||||
modNameReStr := splitRe.ReplaceAllString(modName, "[-_]+")
|
log.Fatal("Kernel module.dep not found: ", modDir)
|
||||||
re := regexp.MustCompile("^" + modNameReStr + "$")
|
}
|
||||||
|
|
||||||
s := bufio.NewScanner(modulesDep)
|
fd, err := os.Open(modDep)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("Unable to open modules.dep: ", modDep)
|
||||||
|
return deps, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer fd.Close()
|
||||||
|
s := bufio.NewScanner(fd)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
fields := strings.Fields(s.Text())
|
fields := strings.Fields(s.Text())
|
||||||
if len(fields) == 0 {
|
fields[0] = strings.TrimSuffix(fields[0], ":")
|
||||||
|
if modName != filepath.Base(stripExts(fields[0])) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fields[0] = strings.TrimSuffix(fields[0], ":")
|
for _, modPath := range fields {
|
||||||
|
deps = append(deps, modPath)
|
||||||
found := re.FindAll([]byte(filepath.Base(stripExts(fields[0]))), -1)
|
|
||||||
if len(found) > 0 {
|
|
||||||
deps = append(deps, fields...)
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := s.Err(); err != nil {
|
if err := s.Err(); err != nil {
|
||||||
|
56
main_test.go
56
main_test.go
@@ -1,10 +1,8 @@
|
|||||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -26,57 +24,3 @@ func TestStripExts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func stringSlicesEqual(a []string, b []string) bool {
|
|
||||||
if len(a) != len(b) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, v := range a {
|
|
||||||
if v != b[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var testModuleDep string = `
|
|
||||||
kernel/sound/soc/codecs/snd-soc-msm8916-digital.ko:
|
|
||||||
kernel/net/sched/act_ipt.ko.xz: kernel/net/netfilter/x_tables.ko.xz
|
|
||||||
kernel/drivers/watchdog/watchdog.ko.xz:
|
|
||||||
kernel/drivers/usb/serial/ir-usb.ko.xz: kernel/drivers/usb/serial/usbserial.ko.xz
|
|
||||||
kernel/drivers/gpu/drm/scheduler/gpu-sched.ko.xz:
|
|
||||||
kernel/drivers/hid/hid-alps.ko.xz:
|
|
||||||
kernel/net/netfilter/xt_u32.ko.xz: kernel/net/netfilter/x_tables.ko.xz
|
|
||||||
kernel/net/netfilter/xt_sctp.ko.xz: kernel/net/netfilter/x_tables.ko.xz
|
|
||||||
kernel/drivers/hwmon/gl518sm.ko.xz:
|
|
||||||
kernel/drivers/watchdog/dw_wdt.ko.xz: kernel/drivers/watchdog/watchdog.ko.xz
|
|
||||||
kernel/net/bluetooth/hidp/hidp.ko.xz: kernel/net/bluetooth/bluetooth.ko.xz kernel/net/rfkill/rfkill.ko.xz kernel/crypto/ecdh_generic.ko.xz kernel/crypto/ecc.ko.xz
|
|
||||||
kernel/fs/nls/nls_iso8859-1.ko.xz:
|
|
||||||
kernel/net/vmw_vsock/vmw_vsock_virtio_transport.ko.xz: kernel/net/vmw_vsock/vmw_vsock_virtio_transport_common.ko.xz kernel/drivers/virtio/virtio.ko.xz kernel/drivers/virtio/virtio_ring.ko.xz kernel/net/vmw_vsock/vsock.ko.xz
|
|
||||||
kernel/drivers/gpu/drm/panfrost/panfrost.ko.xz: kernel/drivers/gpu/drm/scheduler/gpu-sched.ko.xz
|
|
||||||
kernel/drivers/gpu/drm/msm/msm.ko: kernel/drivers/gpu/drm/drm_kms_helper.ko
|
|
||||||
`
|
|
||||||
|
|
||||||
func TestGetModuleDeps(t *testing.T) {
|
|
||||||
tables := []struct {
|
|
||||||
in string
|
|
||||||
expected []string
|
|
||||||
}{
|
|
||||||
{"nls-iso8859-1", []string{"kernel/fs/nls/nls_iso8859-1.ko.xz"}},
|
|
||||||
{"gpu_sched", []string{"kernel/drivers/gpu/drm/scheduler/gpu-sched.ko.xz"}},
|
|
||||||
{"dw-wdt", []string{"kernel/drivers/watchdog/dw_wdt.ko.xz",
|
|
||||||
"kernel/drivers/watchdog/watchdog.ko.xz"}},
|
|
||||||
{"gl518sm", []string{"kernel/drivers/hwmon/gl518sm.ko.xz"}},
|
|
||||||
{"msm", []string{"kernel/drivers/gpu/drm/msm/msm.ko",
|
|
||||||
"kernel/drivers/gpu/drm/drm_kms_helper.ko"}},
|
|
||||||
}
|
|
||||||
for _, table := range tables {
|
|
||||||
out, err := getModuleDeps(table.in, strings.NewReader(testModuleDep))
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("unexpected error with input: %q, error: %q", table.expected, err)
|
|
||||||
}
|
|
||||||
if !stringSlicesEqual(out, table.expected) {
|
|
||||||
t.Errorf("Expected: %q, got: %q", table.expected, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@@ -1,28 +1,26 @@
|
|||||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
package archive
|
package archive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/flate"
|
"compress/flate"
|
||||||
"fmt"
|
"crypto/sha256"
|
||||||
"io"
|
"encoding/hex"
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/cavaliercoder/go-cpio"
|
"github.com/cavaliercoder/go-cpio"
|
||||||
"github.com/klauspost/pgzip"
|
"github.com/klauspost/pgzip"
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/pkgs/misc"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Archive struct {
|
type Archive struct {
|
||||||
items archiveItems
|
Dirs misc.StringSet
|
||||||
|
Files misc.StringSet
|
||||||
cpioWriter *cpio.Writer
|
cpioWriter *cpio.Writer
|
||||||
buf *bytes.Buffer
|
buf *bytes.Buffer
|
||||||
}
|
}
|
||||||
@@ -31,184 +29,166 @@ func New() (*Archive, error) {
|
|||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
archive := &Archive{
|
archive := &Archive{
|
||||||
cpioWriter: cpio.NewWriter(buf),
|
cpioWriter: cpio.NewWriter(buf),
|
||||||
|
Files: make(misc.StringSet),
|
||||||
|
Dirs: make(misc.StringSet),
|
||||||
buf: buf,
|
buf: buf,
|
||||||
}
|
}
|
||||||
|
|
||||||
return archive, nil
|
return archive, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type archiveItem struct {
|
|
||||||
sourcePath string
|
|
||||||
header *cpio.Header
|
|
||||||
}
|
|
||||||
|
|
||||||
type archiveItems struct {
|
|
||||||
items []archiveItem
|
|
||||||
sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds the given item to the archiveItems, only if it doesn't already exist in
|
|
||||||
// the list. The items are kept sorted in ascending order.
|
|
||||||
func (a *archiveItems) Add(item archiveItem) {
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
|
|
||||||
if len(a.items) < 1 {
|
|
||||||
// empty list
|
|
||||||
a.items = append(a.items, item)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// find existing item, or index of where new item should go
|
|
||||||
i := sort.Search(len(a.items), func(i int) bool {
|
|
||||||
return strings.Compare(item.header.Name, a.items[i].header.Name) <= 0
|
|
||||||
})
|
|
||||||
|
|
||||||
if i >= len(a.items) {
|
|
||||||
// doesn't exist in list, but would be at the very end
|
|
||||||
a.items = append(a.items, item)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Compare(a.items[i].header.Name, item.header.Name) == 0 {
|
|
||||||
// already in list
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// grow list by 1, shift right at index, and insert new string at index
|
|
||||||
a.items = append(a.items, archiveItem{})
|
|
||||||
copy(a.items[i+1:], a.items[i:])
|
|
||||||
a.items[i] = item
|
|
||||||
}
|
|
||||||
|
|
||||||
// iterate through items and send each one over the returned channel
|
|
||||||
func (a *archiveItems) IterItems() <-chan archiveItem {
|
|
||||||
ch := make(chan archiveItem)
|
|
||||||
go func() {
|
|
||||||
a.RLock()
|
|
||||||
defer a.RUnlock()
|
|
||||||
|
|
||||||
for _, item := range a.items {
|
|
||||||
ch <- item
|
|
||||||
}
|
|
||||||
close(ch)
|
|
||||||
}()
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
func (archive *Archive) Write(path string, mode os.FileMode) error {
|
func (archive *Archive) Write(path string, mode os.FileMode) error {
|
||||||
if err := archive.writeCpio(); err != nil {
|
if err := archive.writeCpio(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := archive.cpioWriter.Close(); err != nil {
|
if err := archive.cpioWriter.Close(); err != nil {
|
||||||
return fmt.Errorf("archive.Write: error closing archive: %w", err)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write archive to path
|
// Write archive to path
|
||||||
if err := archive.writeCompressed(path, mode); err != nil {
|
if err := archive.writeCompressed(path, mode); err != nil {
|
||||||
return fmt.Errorf("unable to write archive to location %q: %w", path, err)
|
log.Print("Unable to write archive to location: ", path)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// test the archive to make sure it's valid
|
||||||
|
if err := test(path); err != nil {
|
||||||
|
log.Print("Verification of archive failed!")
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.Chmod(path, mode); err != nil {
|
if err := os.Chmod(path, mode); err != nil {
|
||||||
return fmt.Errorf("unable to chmod %q to %s: %w", path, mode, err)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds the given items in the map to the archive. The map format is {source path:dest path}.
|
func checksum(path string) (string, error) {
|
||||||
// Internally this just calls AddItem on each key,value pair in the map.
|
var sum string
|
||||||
func (archive *Archive) AddItems(paths map[string]string) error {
|
|
||||||
for s, d := range paths {
|
|
||||||
if err := archive.AddItem(s, d); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds the given file or directory at "source" to the archive at "dest"
|
buf := make([]byte, 64*1024)
|
||||||
func (archive *Archive) AddItem(source string, dest string) error {
|
sha256 := sha256.New()
|
||||||
|
fd, err := os.Open(path)
|
||||||
|
defer fd.Close()
|
||||||
|
|
||||||
sourceStat, err := os.Lstat(source)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e, ok := err.(*os.PathError)
|
log.Print("Unable to checksum: ", path)
|
||||||
if e.Err == syscall.ENOENT && ok {
|
return sum, err
|
||||||
// doesn't exist in current filesystem, assume it's a new directory
|
}
|
||||||
return archive.addDir(dest)
|
|
||||||
|
// Read file in chunks
|
||||||
|
for {
|
||||||
|
bytes, err := fd.Read(buf)
|
||||||
|
if bytes > 0 {
|
||||||
|
_, err := sha256.Write(buf[:bytes])
|
||||||
|
if err != nil {
|
||||||
|
log.Print("Unable to checksum: ", path)
|
||||||
|
return sum, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sourceStat.Mode()&os.ModeDir != 0 {
|
if err == io.EOF {
|
||||||
return archive.addDir(dest)
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
sum = hex.EncodeToString(sha256.Sum(nil))
|
||||||
return archive.addFile(source, dest)
|
return sum, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (archive *Archive) addFile(source string, dest string) error {
|
func (archive *Archive) AddFile(file string, dest string) error {
|
||||||
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sourceStat, err := os.Lstat(source)
|
if archive.Files[file] {
|
||||||
|
// Already written to cpio
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fileStat, err := os.Lstat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print("addFile: failed to stat file: ", source)
|
log.Print("AddFile: failed to stat file: ", file)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Symlink: write symlink to archive then set 'file' to link target
|
// Symlink: write symlink to archive then set 'file' to link target
|
||||||
if sourceStat.Mode()&os.ModeSymlink != 0 {
|
if fileStat.Mode()&os.ModeSymlink != 0 {
|
||||||
// log.Printf("File %q is a symlink", file)
|
// log.Printf("File %q is a symlink", file)
|
||||||
target, err := os.Readlink(source)
|
target, err := os.Readlink(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Print("addFile: failed to get symlink target: ", source)
|
log.Print("AddFile: failed to get symlink target: ", file)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
destFilename := strings.TrimPrefix(dest, "/")
|
destFilename := strings.TrimPrefix(dest, "/")
|
||||||
|
hdr := &cpio.Header{
|
||||||
|
Name: destFilename,
|
||||||
|
Linkname: target,
|
||||||
|
Mode: 0644 | cpio.ModeSymlink,
|
||||||
|
Size: int64(len(target)),
|
||||||
|
// Checksum: 1,
|
||||||
|
}
|
||||||
|
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err = archive.cpioWriter.Write([]byte(target)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
archive.items.Add(archiveItem{
|
archive.Files[file] = true
|
||||||
sourcePath: source,
|
|
||||||
header: &cpio.Header{
|
|
||||||
Name: destFilename,
|
|
||||||
Linkname: target,
|
|
||||||
Mode: 0644 | cpio.ModeSymlink,
|
|
||||||
Size: int64(len(target)),
|
|
||||||
// Checksum: 1,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
if filepath.Dir(target) == "." {
|
if filepath.Dir(target) == "." {
|
||||||
target = filepath.Join(filepath.Dir(source), target)
|
target = filepath.Join(filepath.Dir(file), target)
|
||||||
}
|
}
|
||||||
// make sure target is an absolute path
|
// make sure target is an absolute path
|
||||||
if !filepath.IsAbs(target) {
|
if !filepath.IsAbs(target) {
|
||||||
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(source))
|
target, err = misc.RelativeSymlinkTargetToDir(target, filepath.Dir(file))
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// TODO: add verbose mode, print stuff like this:
|
// TODO: add verbose mode, print stuff like this:
|
||||||
// log.Printf("symlink: %q, target: %q", file, target)
|
// log.Printf("symlink: %q, target: %q", file, target)
|
||||||
// write symlink target
|
// write symlink target
|
||||||
err = archive.addFile(target, target)
|
err = archive.AddFile(target, target)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
destFilename := strings.TrimPrefix(dest, "/")
|
// log.Printf("writing file: %q", file)
|
||||||
|
|
||||||
archive.items.Add(archiveItem{
|
fd, err := os.Open(file)
|
||||||
sourcePath: source,
|
if err != nil {
|
||||||
header: &cpio.Header{
|
return err
|
||||||
Name: destFilename,
|
}
|
||||||
Mode: cpio.FileMode(sourceStat.Mode().Perm()),
|
defer fd.Close()
|
||||||
Size: sourceStat.Size(),
|
|
||||||
// Checksum: 1,
|
destFilename := strings.TrimPrefix(dest, "/")
|
||||||
},
|
hdr := &cpio.Header{
|
||||||
})
|
Name: destFilename,
|
||||||
|
Mode: cpio.FileMode(fileStat.Mode().Perm()),
|
||||||
|
Size: fileStat.Size(),
|
||||||
|
// Checksum: 1,
|
||||||
|
}
|
||||||
|
if err := archive.cpioWriter.WriteHeader(hdr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(archive.cpioWriter, fd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
archive.Files[file] = true
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use busybox gzip to test archive
|
||||||
|
func test(path string) error {
|
||||||
|
cmd := exec.Command("busybox", "gzip", "-t", path)
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
log.Print("'boot-deploy' command failed: ")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -246,48 +226,29 @@ func (archive *Archive) writeCompressed(path string, mode os.FileMode) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (archive *Archive) writeCpio() error {
|
func (archive *Archive) writeCpio() error {
|
||||||
// having a transient function for actually adding files to the archive
|
// Write any dirs added explicitly
|
||||||
// allows the deferred fd.close to run after every copy and prevent having
|
for dir := range archive.Dirs {
|
||||||
// tons of open file handles until the copying is all done
|
archive.addDir(dir)
|
||||||
copyToArchive := func(source string, header *cpio.Header) error {
|
|
||||||
|
|
||||||
if err := archive.cpioWriter.WriteHeader(header); err != nil {
|
|
||||||
return fmt.Errorf("archive.writeCpio: unable to write header: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// don't copy actual dirs into the archive, writing the header is enough
|
|
||||||
if !header.Mode.IsDir() {
|
|
||||||
if header.Mode.IsRegular() {
|
|
||||||
fd, err := os.Open(source)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("archive.writeCpio: uname to open file %q, %w", source, err)
|
|
||||||
}
|
|
||||||
defer fd.Close()
|
|
||||||
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
|
|
||||||
return fmt.Errorf("archive.writeCpio: unable to write out archive: %w", err)
|
|
||||||
}
|
|
||||||
} else if header.Linkname != "" {
|
|
||||||
// the contents of a symlink is just need the link name
|
|
||||||
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
|
|
||||||
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("archive.writeCpio: unknown type for file: %s", source)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range archive.items.IterItems() {
|
// Write files and any missing parent dirs
|
||||||
if err := copyToArchive(i.sourcePath, i.header); err != nil {
|
for file, imported := range archive.Files {
|
||||||
|
if imported {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := archive.AddFile(file, file); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (archive *Archive) addDir(dir string) error {
|
func (archive *Archive) addDir(dir string) error {
|
||||||
|
if archive.Dirs[dir] {
|
||||||
|
// Already imported
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if dir == "/" {
|
if dir == "/" {
|
||||||
dir = "."
|
dir = "."
|
||||||
}
|
}
|
||||||
@@ -295,13 +256,19 @@ func (archive *Archive) addDir(dir string) error {
|
|||||||
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
|
subdirs := strings.Split(strings.TrimPrefix(dir, "/"), "/")
|
||||||
for i, subdir := range subdirs {
|
for i, subdir := range subdirs {
|
||||||
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
|
path := filepath.Join(strings.Join(subdirs[:i], "/"), subdir)
|
||||||
archive.items.Add(archiveItem{
|
if archive.Dirs[path] {
|
||||||
sourcePath: path,
|
// Subdir already imported
|
||||||
header: &cpio.Header{
|
continue
|
||||||
Name: path,
|
}
|
||||||
Mode: cpio.ModeDir | 0755,
|
err := archive.cpioWriter.WriteHeader(&cpio.Header{
|
||||||
},
|
Name: path,
|
||||||
|
Mode: cpio.ModeDir | 0755,
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
archive.Dirs[path] = true
|
||||||
|
// log.Print("wrote dir: ", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@@ -1,189 +0,0 @@
|
|||||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/cavaliercoder/go-cpio"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestArchiveItemsAdd(t *testing.T) {
|
|
||||||
subtests := []struct {
|
|
||||||
name string
|
|
||||||
inItems []archiveItem
|
|
||||||
inItem archiveItem
|
|
||||||
expected []archiveItem
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "empty list",
|
|
||||||
inItems: []archiveItem{},
|
|
||||||
inItem: archiveItem{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
expected: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "already exists",
|
|
||||||
inItems: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
inItem: archiveItem{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
expected: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add new",
|
|
||||||
inItems: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar1",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar1"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
inItem: archiveItem{
|
|
||||||
sourcePath: "/foo/bar0",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar0"},
|
|
||||||
},
|
|
||||||
expected: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar0",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar0"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar1",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar1"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add new at beginning",
|
|
||||||
inItems: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
inItem: archiveItem{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
expected: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo/bar",
|
|
||||||
header: &cpio.Header{Name: "/foo/bar"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "add new at end",
|
|
||||||
inItems: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
inItem: archiveItem{
|
|
||||||
sourcePath: "/zzz/bazz",
|
|
||||||
header: &cpio.Header{Name: "/zzz/bazz"},
|
|
||||||
},
|
|
||||||
expected: []archiveItem{
|
|
||||||
{
|
|
||||||
sourcePath: "/bazz/bar",
|
|
||||||
header: &cpio.Header{Name: "/bazz/bar"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/foo",
|
|
||||||
header: &cpio.Header{Name: "/foo"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
sourcePath: "/zzz/bazz",
|
|
||||||
header: &cpio.Header{Name: "/zzz/bazz"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, st := range subtests {
|
|
||||||
t.Run(st.name, func(t *testing.T) {
|
|
||||||
a := archiveItems{items: st.inItems}
|
|
||||||
a.Add(st.inItem)
|
|
||||||
if !reflect.DeepEqual(st.expected, a.items) {
|
|
||||||
t.Fatal("expected:", st.expected, " got: ", a.items)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@@ -1,130 +1,53 @@
|
|||||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
package deviceinfo
|
package deviceinfo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"errors"
|
||||||
"fmt"
|
"github.com/BurntSushi/toml"
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Note: fields must be exported (start with capital letter)
|
||||||
|
// https://github.com/BurntSushi/toml/issues/121
|
||||||
type DeviceInfo struct {
|
type DeviceInfo struct {
|
||||||
AppendDtb string
|
Deviceinfo_append_dtb string
|
||||||
Arch string
|
Deviceinfo_arch string
|
||||||
UbootBoardname string
|
Deviceinfo_bootimg_append_seandroidenforce string
|
||||||
BootimgAppendSEAndroidEnforce string
|
Deviceinfo_bootimg_blobpack string
|
||||||
BootimgBlobpack string
|
Deviceinfo_bootimg_dtb_second string
|
||||||
BootimgDtbSecond string
|
Deviceinfo_bootimg_mtk_mkimage string
|
||||||
BootimgMtkMkimage string
|
Deviceinfo_bootimg_pxa string
|
||||||
BootimgPxa string
|
Deviceinfo_bootimg_qcdt string
|
||||||
BootimgQcdt string
|
Deviceinfo_dtb string
|
||||||
Dtb string
|
Deviceinfo_flash_offset_base string
|
||||||
FlashKernelOnUpdate string
|
Deviceinfo_flash_offset_kernel string
|
||||||
FlashOffsetBase string
|
Deviceinfo_flash_offset_ramdisk string
|
||||||
FlashOffsetKernel string
|
Deviceinfo_flash_offset_second string
|
||||||
FlashOffsetRamdisk string
|
Deviceinfo_flash_offset_tags string
|
||||||
FlashOffsetSecond string
|
Deviceinfo_flash_pagesize string
|
||||||
FlashOffsetTags string
|
Deviceinfo_generate_bootimg string
|
||||||
FlashPagesize string
|
Deviceinfo_generate_legacy_uboot_initfs string
|
||||||
GenerateBootimg string
|
Deviceinfo_mesa_driver string
|
||||||
GenerateLegacyUbootInitfs string
|
Deviceinfo_mkinitfs_postprocess string
|
||||||
InitfsCompression string
|
Deviceinfo_initfs_compression string
|
||||||
KernelCmdline string
|
Deviceinfo_kernel_cmdline string
|
||||||
LegacyUbootLoadAddress string
|
Deviceinfo_legacy_uboot_load_address string
|
||||||
MesaDriver string
|
Deviceinfo_modules_initfs string
|
||||||
MkinitfsPostprocess string
|
Deviceinfo_flash_kernel_on_update string
|
||||||
ModulesInitfs string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadDeviceinfo(file string) (DeviceInfo, error) {
|
func ReadDeviceinfo() (DeviceInfo, error) {
|
||||||
|
file := "/etc/deviceinfo"
|
||||||
var deviceinfo DeviceInfo
|
var deviceinfo DeviceInfo
|
||||||
|
|
||||||
fd, err := os.Open(file)
|
_, err := os.Stat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return deviceinfo, err
|
return deviceinfo, errors.New("Unable to find deviceinfo: " + file)
|
||||||
}
|
|
||||||
defer fd.Close()
|
|
||||||
|
|
||||||
if err := unmarshal(fd, &deviceinfo); err != nil {
|
|
||||||
return deviceinfo, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, err := toml.DecodeFile(file, &deviceinfo); err != nil {
|
||||||
|
return deviceinfo, err
|
||||||
|
}
|
||||||
return deviceinfo, nil
|
return deviceinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshals a deviceinfo into a DeviceInfo struct
|
|
||||||
func unmarshal(r io.Reader, devinfo *DeviceInfo) error {
|
|
||||||
s := bufio.NewScanner(r)
|
|
||||||
for s.Scan() {
|
|
||||||
line := s.Text()
|
|
||||||
if strings.HasPrefix(line, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// line isn't setting anything, so just ignore it
|
|
||||||
if !strings.Contains(line, "=") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// sometimes line has a comment at the end after setting an option
|
|
||||||
line = strings.SplitN(line, "#", 2)[0]
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
|
|
||||||
// must support having '=' in the value (e.g. kernel cmdline)
|
|
||||||
parts := strings.SplitN(line, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
name, val := parts[0], parts[1]
|
|
||||||
val = strings.ReplaceAll(val, "\"", "")
|
|
||||||
|
|
||||||
if name == "deviceinfo_format_version" && val != "0" {
|
|
||||||
return fmt.Errorf("deviceinfo format version %q is not supported", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldName := nameToField(name)
|
|
||||||
|
|
||||||
if fieldName == "" {
|
|
||||||
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
field := reflect.ValueOf(devinfo).Elem().FieldByName(fieldName)
|
|
||||||
if !field.IsValid() {
|
|
||||||
// an option that meets the deviceinfo "specification", but isn't
|
|
||||||
// one we care about in this module
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
field.SetString(val)
|
|
||||||
}
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
log.Print("unable to parse deviceinfo: ", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert string into the string format used for DeviceInfo fields.
|
|
||||||
// Note: does not test that the resulting field name is a valid field in the
|
|
||||||
// DeviceInfo struct!
|
|
||||||
func nameToField(name string) string {
|
|
||||||
var field string
|
|
||||||
parts := strings.Split(name, "_")
|
|
||||||
for _, p := range parts {
|
|
||||||
if p == "deviceinfo" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(p) < 1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
field = field + strings.ToUpper(p[:1]) + p[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return field
|
|
||||||
}
|
|
||||||
|
@@ -1,81 +0,0 @@
|
|||||||
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package deviceinfo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Test conversion of name to DeviceInfo struct field format
|
|
||||||
func TestNameToField(t *testing.T) {
|
|
||||||
tables := []struct {
|
|
||||||
in string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{"deviceinfo_dtb", "Dtb"},
|
|
||||||
{"dtb", "Dtb"},
|
|
||||||
{"deviceinfo_modules_initfs", "ModulesInitfs"},
|
|
||||||
{"modules_initfs", "ModulesInitfs"},
|
|
||||||
{"deviceinfo_modules_initfs___", "ModulesInitfs"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, table := range tables {
|
|
||||||
out := nameToField(table.in)
|
|
||||||
if out != table.expected {
|
|
||||||
t.Errorf("expected: %q, got: %q", table.expected, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test unmarshalling with lines in deviceinfo
|
|
||||||
func TestUnmarshal(t *testing.T) {
|
|
||||||
tables := []struct {
|
|
||||||
// field is just used for reflection within the test, so it must be a
|
|
||||||
// valid DeviceInfo field
|
|
||||||
field string
|
|
||||||
in string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{"ModulesInitfs", "deviceinfo_modules_initfs=\"panfrost foo bar bazz\"\n", "panfrost foo bar bazz"},
|
|
||||||
{"ModulesInitfs", "deviceinfo_modules_initfs=\"panfrost foo bar bazz\"", "panfrost foo bar bazz"},
|
|
||||||
// line with multiple '='
|
|
||||||
{"KernelCmdline",
|
|
||||||
"deviceinfo_kernel_cmdline=\"PMOS_NO_OUTPUT_REDIRECT fw_devlink=off nvme_core.default_ps_max_latency_us=5500 pcie_aspm.policy=performance\"\n",
|
|
||||||
"PMOS_NO_OUTPUT_REDIRECT fw_devlink=off nvme_core.default_ps_max_latency_us=5500 pcie_aspm.policy=performance"},
|
|
||||||
// empty option
|
|
||||||
{"ModulesInitfs", "deviceinfo_modules_initfs=\"\"\n", ""},
|
|
||||||
{"Dtb", "deviceinfo_dtb=\"freescale/imx8mq-librem5-r2 freescale/imx8mq-librem5-r3 freescale/imx8mq-librem5-r4\"\n",
|
|
||||||
"freescale/imx8mq-librem5-r2 freescale/imx8mq-librem5-r3 freescale/imx8mq-librem5-r4"},
|
|
||||||
// valid deviceinfo line, just not used in this module
|
|
||||||
{"", "deviceinfo_codename=\"pine64-pinebookpro\"", ""},
|
|
||||||
// line with comment at the end
|
|
||||||
{"MesaDriver", "deviceinfo_mesa_driver=\"panfrost\" # this is a nice driver", "panfrost"},
|
|
||||||
{"", "# this is a comment!\n", ""},
|
|
||||||
// empty lines are fine
|
|
||||||
{"", "", ""},
|
|
||||||
// line with whitepace characters only
|
|
||||||
{"", " \t \n\r", ""},
|
|
||||||
}
|
|
||||||
var d DeviceInfo
|
|
||||||
for _, table := range tables {
|
|
||||||
testName := fmt.Sprintf("unmarshal::'%s':", strings.ReplaceAll(table.in, "\n", "\\n"))
|
|
||||||
if err := unmarshal(strings.NewReader(table.in), &d); err != nil {
|
|
||||||
t.Errorf("%s received an unexpected err: ", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check against expected value
|
|
||||||
field := reflect.ValueOf(&d).Elem().FieldByName(table.field)
|
|
||||||
out := ""
|
|
||||||
if table.field != "" {
|
|
||||||
out = field.String()
|
|
||||||
}
|
|
||||||
if out != table.expected {
|
|
||||||
t.Errorf("%s expected: %q, got: %q", testName, table.expected, out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@@ -1,6 +1,5 @@
|
|||||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
// Copyright 2021 Clayton Craft <clayton@craftyguy.net>
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
package misc
|
package misc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -10,6 +9,8 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type StringSet map[string]bool
|
||||||
|
|
||||||
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
||||||
// absolute path
|
// absolute path
|
||||||
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
||||||
@@ -46,31 +47,3 @@ func FreeSpace(path string) (uint64, error) {
|
|||||||
size := stat.Bavail * uint64(stat.Bsize)
|
size := stat.Bavail * uint64(stat.Bsize)
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge the contents of "b" into "a", overwriting any previously existing keys
|
|
||||||
// in "a"
|
|
||||||
func Merge(a map[string]string, b map[string]string) {
|
|
||||||
for k, v := range b {
|
|
||||||
a[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removes duplicate entries from the given string slice and returns a slice
|
|
||||||
// with the unique values
|
|
||||||
func RemoveDuplicates(in []string) (out []string) {
|
|
||||||
// use a map to "remove" duplicates. the value in the map is totally
|
|
||||||
// irrelevant
|
|
||||||
outMap := make(map[string]bool)
|
|
||||||
for _, s := range in {
|
|
||||||
if ok := outMap[s]; !ok {
|
|
||||||
outMap[s] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out = make([]string, 0, len(outMap))
|
|
||||||
for k := range outMap {
|
|
||||||
out = append(out, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
@@ -1,125 +0,0 @@
|
|||||||
// Copyright 2022 Clayton Craft <clayton@craftyguy.net>
|
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
|
||||||
|
|
||||||
package misc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestMerge(t *testing.T) {
|
|
||||||
subtests := []struct {
|
|
||||||
name string
|
|
||||||
inA map[string]string
|
|
||||||
inB map[string]string
|
|
||||||
expected map[string]string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "empty B",
|
|
||||||
inA: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"banana": "airplane",
|
|
||||||
},
|
|
||||||
inB: map[string]string{},
|
|
||||||
expected: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"banana": "airplane",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "empty A",
|
|
||||||
inA: map[string]string{},
|
|
||||||
inB: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"banana": "airplane",
|
|
||||||
},
|
|
||||||
expected: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"banana": "airplane",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "both populated, some duplicates",
|
|
||||||
inA: map[string]string{
|
|
||||||
"bar": "bazz",
|
|
||||||
"banana": "yellow",
|
|
||||||
"guava": "green",
|
|
||||||
},
|
|
||||||
inB: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"banana": "airplane",
|
|
||||||
},
|
|
||||||
expected: map[string]string{
|
|
||||||
"foo": "bar",
|
|
||||||
"guava": "green",
|
|
||||||
"banana": "airplane",
|
|
||||||
"bar": "bazz",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, st := range subtests {
|
|
||||||
t.Run(st.name, func(t *testing.T) {
|
|
||||||
out := st.inA
|
|
||||||
Merge(out, st.inB)
|
|
||||||
if !reflect.DeepEqual(st.expected, out) {
|
|
||||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemoveDuplicates(t *testing.T) {
|
|
||||||
subtests := []struct {
|
|
||||||
name string
|
|
||||||
in []string
|
|
||||||
expected []string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "no duplicates",
|
|
||||||
in: []string{
|
|
||||||
"foo",
|
|
||||||
"bar",
|
|
||||||
"banana",
|
|
||||||
"airplane",
|
|
||||||
},
|
|
||||||
expected: []string{
|
|
||||||
"foo",
|
|
||||||
"bar",
|
|
||||||
"banana",
|
|
||||||
"airplane",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "all duplicates",
|
|
||||||
in: []string{
|
|
||||||
"foo",
|
|
||||||
"foo",
|
|
||||||
"foo",
|
|
||||||
"foo",
|
|
||||||
},
|
|
||||||
expected: []string{
|
|
||||||
"foo",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "empty",
|
|
||||||
in: []string{},
|
|
||||||
expected: []string{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, st := range subtests {
|
|
||||||
t.Run(st.name, func(t *testing.T) {
|
|
||||||
// note: sorting to make comparison easier later
|
|
||||||
sort.Strings(st.expected)
|
|
||||||
out := RemoveDuplicates(st.in)
|
|
||||||
sort.Strings(out)
|
|
||||||
if !reflect.DeepEqual(st.expected, out) {
|
|
||||||
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
Reference in New Issue
Block a user