Compare commits
27 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
f6e4773507 | ||
|
7a07a16ecb | ||
|
4f6af31a7a | ||
|
39ee6752fd | ||
|
0edee0afbd | ||
|
95edf678f4 | ||
|
be6a6da417 | ||
|
4e771ab96f | ||
|
4d7dd79bcf | ||
|
d63e600614 | ||
|
741c0553d5 | ||
|
cd97df108a | ||
|
1fed057a82 | ||
|
5efdb9f170 | ||
|
81de8b438d | ||
|
af9a0f0ca5 | ||
|
014563fdbc | ||
|
83282187c2 | ||
|
eda4f3ba22 | ||
|
866d37b85d | ||
|
1334fdfa26 | ||
|
56db822b88 | ||
|
631d6078c2 | ||
|
e5f14d70a6 | ||
|
dd5cdeace5 | ||
|
1a99953aa2 | ||
|
e2f4e6254f |
120
.gitlab-ci.yml
120
.gitlab-ci.yml
@@ -6,43 +6,123 @@ image: alpine:edge
|
|||||||
variables:
|
variables:
|
||||||
GOFLAGS: "-buildvcs=false"
|
GOFLAGS: "-buildvcs=false"
|
||||||
PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/mkinitfs-vendor-${CI_COMMIT_TAG}/${CI_COMMIT_TAG}"
|
PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/mkinitfs-vendor-${CI_COMMIT_TAG}/${CI_COMMIT_TAG}"
|
||||||
|
CI_TRON_TEMPLATE_PROJECT: &ci-tron-template-project postmarketOS/ci-common
|
||||||
|
CI_TRON_JOB_TEMPLATE_PROJECT_URL: $CI_SERVER_URL/$CI_TRON_TEMPLATE_PROJECT
|
||||||
|
CI_TRON_JOB_TEMPLATE_COMMIT: &ci-tron-template-commit 7c95b5f2d53533e8722abf57c73e558168e811f3
|
||||||
|
|
||||||
|
include:
|
||||||
|
- project: *ci-tron-template-project
|
||||||
|
ref: *ci-tron-template-commit
|
||||||
|
file: '/ci-tron/common.yml'
|
||||||
|
|
||||||
stages:
|
stages:
|
||||||
- lint
|
|
||||||
- build
|
- build
|
||||||
|
- hardware tests
|
||||||
- vendor
|
- vendor
|
||||||
- release
|
- release
|
||||||
|
|
||||||
# defaults for "only"
|
workflow:
|
||||||
# We need to run the CI jobs in a "merge request specific context", if CI is
|
rules:
|
||||||
# running in a merge request. Otherwise the environment variable that holds the
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||||
# merge request ID is not available. This means, we must set the "only"
|
- if: $CI_COMMIT_BRANCH == 'master'
|
||||||
# variable accordingly - and if we only do it for one job, all other jobs will
|
- if: '$CI_COMMIT_TAG != null'
|
||||||
# not get executed. So have the defaults here, and use them in all jobs that
|
|
||||||
# should run on both the master branch, and in merge requests.
|
|
||||||
# https://docs.gitlab.com/ee/ci/merge_request_pipelines/index.html#excluding-certain-jobs
|
|
||||||
.only-default: &only-default
|
|
||||||
only:
|
|
||||||
- master
|
|
||||||
- merge_requests
|
|
||||||
- tags
|
|
||||||
|
|
||||||
build:
|
build:
|
||||||
stage: build
|
stage: build
|
||||||
<<: *only-default
|
variables:
|
||||||
|
GOTEST: "gotestsum --junitfile report.xml --format testname -- ./..."
|
||||||
|
parallel:
|
||||||
|
matrix:
|
||||||
|
- TAG: shared
|
||||||
|
- TAG: arm64
|
||||||
|
tags:
|
||||||
|
- $TAG
|
||||||
before_script:
|
before_script:
|
||||||
- apk -q add go staticcheck make scdoc
|
- apk -q add go gotestsum staticcheck make scdoc
|
||||||
script:
|
script:
|
||||||
- make test
|
- make test
|
||||||
- make
|
- make
|
||||||
|
after_script:
|
||||||
|
- mkdir -p rootfs/usr/sbin
|
||||||
|
- cp mkinitfs rootfs/usr/sbin
|
||||||
artifacts:
|
artifacts:
|
||||||
expire_in: 1 week
|
expire_in: 1 week
|
||||||
|
reports:
|
||||||
|
junit: report.xml
|
||||||
|
paths:
|
||||||
|
- rootfs
|
||||||
|
|
||||||
|
.qemu-common:
|
||||||
|
variables:
|
||||||
|
DEVICE_NAME: qemu-$CPU_ARCH
|
||||||
|
KERNEL_VARIANT: lts
|
||||||
|
|
||||||
|
.build-ci-tron-qemu:
|
||||||
|
stage: hardware tests
|
||||||
|
extends:
|
||||||
|
- .pmos-ci-tron-build-boot-artifacts
|
||||||
|
- .qemu-common
|
||||||
|
variables:
|
||||||
|
INSTALL_PACKAGES: device-${DEVICE_NAME} device-${DEVICE_NAME}-kernel-${KERNEL_VARIANT} postmarketos-mkinitfs-hook-ci
|
||||||
|
|
||||||
|
build-ci-tron-qemu-amd64:
|
||||||
|
extends:
|
||||||
|
- .build-ci-tron-qemu
|
||||||
|
needs:
|
||||||
|
- job: "build"
|
||||||
|
parallel:
|
||||||
|
matrix:
|
||||||
|
- TAG: shared
|
||||||
|
variables:
|
||||||
|
CPU_ARCH: amd64
|
||||||
|
|
||||||
|
build-ci-tron-qemu-aarch64:
|
||||||
|
extends:
|
||||||
|
- .build-ci-tron-qemu
|
||||||
|
needs:
|
||||||
|
- job: "build"
|
||||||
|
parallel:
|
||||||
|
matrix:
|
||||||
|
- TAG: arm64
|
||||||
|
variables:
|
||||||
|
CPU_ARCH: aarch64
|
||||||
|
|
||||||
|
.test-ci-tron-qemu:
|
||||||
|
stage: hardware tests
|
||||||
|
extends:
|
||||||
|
- .pmos-ci-tron-initramfs-test
|
||||||
|
- .qemu-common
|
||||||
|
dependencies: []
|
||||||
|
variables:
|
||||||
|
CI_TRON_KERNEL__URL: "glartifact://build-ci-tron-qemu-$CPU_ARCH/${CI_TRON__PMB_EXPORT_PATH}/vmlinuz-${KERNEL_VARIANT}"
|
||||||
|
CI_TRON_INITRAMFS__INITRAMFS__URL: "glartifact://build-ci-tron-qemu-$CPU_ARCH/${CI_TRON__PMB_EXPORT_PATH}/initramfs"
|
||||||
|
CI_TRON_KERNEL_CMDLINE__DEVICEINFO: 'console=tty1 console=ttyS0,115200 PMOS_FORCE_PARTITION_RESIZE'
|
||||||
|
|
||||||
|
test-ci-tron-qemu-amd64:
|
||||||
|
extends:
|
||||||
|
- .test-ci-tron-qemu
|
||||||
|
- .pmos-ci-tron-runner-qemu-amd64
|
||||||
|
needs:
|
||||||
|
- job: 'build-ci-tron-qemu-amd64'
|
||||||
|
artifacts: false
|
||||||
|
variables:
|
||||||
|
CPU_ARCH: amd64
|
||||||
|
|
||||||
|
test-ci-tron-qemu-aarch64:
|
||||||
|
extends:
|
||||||
|
- .test-ci-tron-qemu
|
||||||
|
- .pmos-ci-tron-runner-qemu-aarch64
|
||||||
|
needs:
|
||||||
|
- job: 'build-ci-tron-qemu-aarch64'
|
||||||
|
artifacts: false
|
||||||
|
variables:
|
||||||
|
CPU_ARCH: aarch64
|
||||||
|
|
||||||
vendor:
|
vendor:
|
||||||
stage: vendor
|
stage: vendor
|
||||||
image: alpine:latest
|
image: alpine:latest
|
||||||
only:
|
rules:
|
||||||
- tags
|
- if: '$CI_COMMIT_TAG != null'
|
||||||
before_script:
|
before_script:
|
||||||
- apk -q add curl go make
|
- apk -q add curl go make
|
||||||
script:
|
script:
|
||||||
@@ -54,8 +134,8 @@ vendor:
|
|||||||
release:
|
release:
|
||||||
stage: release
|
stage: release
|
||||||
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
image: registry.gitlab.com/gitlab-org/release-cli:latest
|
||||||
only:
|
rules:
|
||||||
- tags
|
- if: '$CI_COMMIT_TAG != null'
|
||||||
script:
|
script:
|
||||||
- |
|
- |
|
||||||
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \
|
release-cli create --name "Release $CI_COMMIT_TAG" --tag-name $CI_COMMIT_TAG \
|
||||||
|
12
Makefile
12
Makefile
@@ -12,7 +12,13 @@ GO?=go
|
|||||||
GOFLAGS?=
|
GOFLAGS?=
|
||||||
LDFLAGS+=-s -w -X main.Version=$(VERSION)
|
LDFLAGS+=-s -w -X main.Version=$(VERSION)
|
||||||
RM?=rm -f
|
RM?=rm -f
|
||||||
GOTEST=go test -count=1 -race
|
GOTESTOPTS?=-count=1 -race
|
||||||
|
GOTEST?=go test ./...
|
||||||
|
DISABLE_GOGC?=
|
||||||
|
|
||||||
|
ifeq ($(DISABLE_GOGC),1)
|
||||||
|
LDFLAGS+=-X main.DisableGC=true
|
||||||
|
endif
|
||||||
|
|
||||||
GOSRC!=find * -name '*.go'
|
GOSRC!=find * -name '*.go'
|
||||||
GOSRC+=go.mod go.sum
|
GOSRC+=go.mod go.sum
|
||||||
@@ -42,10 +48,10 @@ test:
|
|||||||
fi
|
fi
|
||||||
@staticcheck ./...
|
@staticcheck ./...
|
||||||
|
|
||||||
@$(GOTEST) ./...
|
$(GOTEST) $(GOTESTOPTS)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
$(RM) mkinitfs $(DOCS)
|
$(RM) mkinitfs $(DOCS)
|
||||||
$(RM) $(VENDORED)*
|
$(RM) $(VENDORED)*
|
||||||
|
|
||||||
install: $(DOCS) mkinitfs
|
install: $(DOCS) mkinitfs
|
||||||
|
@@ -9,6 +9,8 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime/debug"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/archive"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/archive"
|
||||||
@@ -26,8 +28,14 @@ import (
|
|||||||
|
|
||||||
// set at build time
|
// set at build time
|
||||||
var Version string
|
var Version string
|
||||||
|
var DisableGC string
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
// To allow working around silly GC-related issues, like https://gitlab.com/qemu-project/qemu/-/issues/2560
|
||||||
|
if strings.ToLower(DisableGC) == "true" {
|
||||||
|
debug.SetGCPercent(-1)
|
||||||
|
}
|
||||||
|
|
||||||
retCode := 0
|
retCode := 0
|
||||||
defer func() { os.Exit(retCode) }()
|
defer func() { os.Exit(retCode) }()
|
||||||
|
|
||||||
@@ -106,30 +114,6 @@ func main() {
|
|||||||
modules.New("/usr/share/mkinitfs/modules"),
|
modules.New("/usr/share/mkinitfs/modules"),
|
||||||
modules.New("/etc/mkinitfs/modules"),
|
modules.New("/etc/mkinitfs/modules"),
|
||||||
})
|
})
|
||||||
if err := initramfsAr.AddItems(initfs); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
log.Println("failed to generate: ", "initramfs")
|
|
||||||
retCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := initramfsAr.Write(filepath.Join(workDir, "initramfs"), os.FileMode(0644)); err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
log.Println("failed to generate: ", "initramfs")
|
|
||||||
retCode = 1
|
|
||||||
return
|
|
||||||
}
|
|
||||||
misc.TimeFunc(start, "initramfs")
|
|
||||||
|
|
||||||
//
|
|
||||||
// initramfs-extra
|
|
||||||
//
|
|
||||||
// deviceinfo.InitfsExtraCompression needs a little more post-processing
|
|
||||||
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
|
|
||||||
log.Printf("== Generating %s ==\n", "initramfs-extra")
|
|
||||||
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
|
|
||||||
|
|
||||||
start = time.Now()
|
|
||||||
initramfsExtraAr := archive.New(compressionFormat, compressionLevel)
|
|
||||||
initfsExtra := initramfs.New([]filelist.FileLister{
|
initfsExtra := initramfs.New([]filelist.FileLister{
|
||||||
hookfiles.New("/usr/share/mkinitfs/files-extra"),
|
hookfiles.New("/usr/share/mkinitfs/files-extra"),
|
||||||
hookfiles.New("/etc/mkinitfs/files-extra"),
|
hookfiles.New("/etc/mkinitfs/files-extra"),
|
||||||
@@ -138,19 +122,58 @@ func main() {
|
|||||||
modules.New("/usr/share/mkinitfs/modules-extra"),
|
modules.New("/usr/share/mkinitfs/modules-extra"),
|
||||||
modules.New("/etc/mkinitfs/modules-extra"),
|
modules.New("/etc/mkinitfs/modules-extra"),
|
||||||
})
|
})
|
||||||
if err := initramfsExtraAr.AddItemsExclude(initfsExtra, initfs); err != nil {
|
|
||||||
|
if err := initramfsAr.AddItems(initfs); err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
log.Println("failed to generate: ", "initramfs-extra")
|
log.Println("failed to generate: ", "initramfs")
|
||||||
retCode = 1
|
retCode = 1
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := initramfsExtraAr.Write(filepath.Join(workDir, "initramfs-extra"), os.FileMode(0644)); err != nil {
|
|
||||||
|
// Include initramfs-extra files in the initramfs if not making a separate
|
||||||
|
// archive
|
||||||
|
if !devinfo.CreateInitfsExtra {
|
||||||
|
if err := initramfsAr.AddItems(initfsExtra); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
log.Println("failed to generate: ", "initramfs")
|
||||||
|
retCode = 1
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := initramfsAr.Write(filepath.Join(workDir, "initramfs"), os.FileMode(0644)); err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
log.Println("failed to generate: ", "initramfs-extra")
|
log.Println("failed to generate: ", "initramfs")
|
||||||
retCode = 1
|
retCode = 1
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
misc.TimeFunc(start, "initramfs-extra")
|
misc.TimeFunc(start, "initramfs")
|
||||||
|
|
||||||
|
if devinfo.CreateInitfsExtra {
|
||||||
|
//
|
||||||
|
// initramfs-extra
|
||||||
|
//
|
||||||
|
// deviceinfo.InitfsExtraCompression needs a little more post-processing
|
||||||
|
compressionFormat, compressionLevel = archive.ExtractFormatLevel(devinfo.InitfsExtraCompression)
|
||||||
|
log.Printf("== Generating %s ==\n", "initramfs-extra")
|
||||||
|
log.Printf("- Using compression format %s with level %q\n", compressionFormat, compressionLevel)
|
||||||
|
|
||||||
|
start = time.Now()
|
||||||
|
initramfsExtraAr := archive.New(compressionFormat, compressionLevel)
|
||||||
|
if err := initramfsExtraAr.AddItemsExclude(initfsExtra, initfs); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
log.Println("failed to generate: ", "initramfs-extra")
|
||||||
|
retCode = 1
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := initramfsExtraAr.Write(filepath.Join(workDir, "initramfs-extra"), os.FileMode(0644)); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
log.Println("failed to generate: ", "initramfs-extra")
|
||||||
|
retCode = 1
|
||||||
|
return
|
||||||
|
}
|
||||||
|
misc.TimeFunc(start, "initramfs-extra")
|
||||||
|
}
|
||||||
|
|
||||||
// Final processing of initramfs / kernel is done by boot-deploy
|
// Final processing of initramfs / kernel is done by boot-deploy
|
||||||
if !disableBootDeploy {
|
if !disableBootDeploy {
|
||||||
|
@@ -42,6 +42,7 @@ mkinitfs reads deviceinfo values from */usr/share/deviceinfo/deviceinfo* and
|
|||||||
*/etc/deviceinfo*, in that order. The following variables
|
*/etc/deviceinfo*, in that order. The following variables
|
||||||
are *required* by mkinitfs:
|
are *required* by mkinitfs:
|
||||||
|
|
||||||
|
- deviceinfo_create_initfs_extra
|
||||||
- deviceinfo_generate_systemd_boot
|
- deviceinfo_generate_systemd_boot
|
||||||
- deviceinfo_initfs_compression
|
- deviceinfo_initfs_compression
|
||||||
- deviceinfo_initfs_extra_compression
|
- deviceinfo_initfs_extra_compression
|
||||||
@@ -54,6 +55,36 @@ a bare minimum, and to require only variables that don't hold lists of things.
|
|||||||
necessary tools to extract the configured archive format are in the initramfs
|
necessary tools to extract the configured archive format are in the initramfs
|
||||||
archive.
|
archive.
|
||||||
|
|
||||||
|
# ARCHIVE COMPRESSION
|
||||||
|
|
||||||
|
Archive compression parameters are specified in the
|
||||||
|
*deviceinfo_initfs_compression* and *deviceinfo_initfs_extra_compression*
|
||||||
|
deviceinfo variables. Their values do not have to match, but special
|
||||||
|
consideration should be taken since some formats may require additional kernel
|
||||||
|
options or tools in the initramfs to support it.
|
||||||
|
|
||||||
|
Supported compression *formats* for mkinitfs are:
|
||||||
|
|
||||||
|
- gzip
|
||||||
|
- lz4
|
||||||
|
- lzma
|
||||||
|
- none
|
||||||
|
- zstd
|
||||||
|
|
||||||
|
Supported compression *levels* for mkinitfs:
|
||||||
|
|
||||||
|
- best
|
||||||
|
- default
|
||||||
|
- fast
|
||||||
|
|
||||||
|
The value of these variables follows this syntax: *<format>:<level>*. For
|
||||||
|
example, *zstd* with the *fast* compression level would be:
|
||||||
|
*deviceinfo_initfs_compression="zstd:fast"*
|
||||||
|
|
||||||
|
Defaults to *gzip* and *default* for both archives if format and/or level is
|
||||||
|
unsupported or omitted.
|
||||||
|
|
||||||
|
|
||||||
# DIRECTORIES
|
# DIRECTORIES
|
||||||
|
|
||||||
The following directories are used by mkinitfs to generate the initramfs and
|
The following directories are used by mkinitfs to generate the initramfs and
|
||||||
@@ -116,7 +147,7 @@ create/manage. mkinitfs reads configuration from */usr/share/mkinitfs* first, an
|
|||||||
## /usr/share/mkinitfs/modules, /etc/mkinitfs/modules
|
## /usr/share/mkinitfs/modules, /etc/mkinitfs/modules
|
||||||
## /usr/share/mkinitfs/modules-extra, /etc/mkinitfs/modules-extra
|
## /usr/share/mkinitfs/modules-extra, /etc/mkinitfs/modules-extra
|
||||||
|
|
||||||
Files with the *.modules* extention in these directories are lists of
|
Files with the *.modules* extension in these directories are lists of
|
||||||
kernel modules to include in the initramfs. Individual modules and
|
kernel modules to include in the initramfs. Individual modules and
|
||||||
directories can be listed in the files here. Globbing is also supported.
|
directories can be listed in the files here. Globbing is also supported.
|
||||||
|
|
||||||
|
10
go.mod
10
go.mod
@@ -7,5 +7,13 @@ require (
|
|||||||
github.com/klauspost/compress v1.15.12
|
github.com/klauspost/compress v1.15.12
|
||||||
github.com/pierrec/lz4/v4 v4.1.17
|
github.com/pierrec/lz4/v4 v4.1.17
|
||||||
github.com/ulikunitz/xz v0.5.10
|
github.com/ulikunitz/xz v0.5.10
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
golang.org/x/sys v0.18.0
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/mvdan/sh v2.6.4+incompatible // indirect
|
||||||
|
golang.org/x/crypto v0.21.0 // indirect
|
||||||
|
golang.org/x/sync v0.6.0 // indirect
|
||||||
|
golang.org/x/term v0.18.0 // indirect
|
||||||
|
mvdan.cc/sh v2.6.4+incompatible // indirect
|
||||||
)
|
)
|
||||||
|
12
go.sum
12
go.sum
@@ -2,9 +2,21 @@ github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e h1:hHg27A0RS
|
|||||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
|
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
|
||||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||||
|
github.com/mvdan/sh v2.6.4+incompatible h1:D4oEWW0J8cL7zeQkrXw76IAYXF0mJfDaBwjgzmKb6zs=
|
||||||
|
github.com/mvdan/sh v2.6.4+incompatible/go.mod h1:kipHzrJQZEDCMTNRVRAlMMFjqHEYrthfIlFkJSrmDZE=
|
||||||
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
||||||
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||||
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
||||||
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||||
|
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||||
|
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||||
|
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||||
|
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||||
|
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||||
|
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||||
|
mvdan.cc/sh v2.6.4+incompatible h1:eD6tDeh0pw+/TOTI1BBEryZ02rD2nMcFsgcvde7jffM=
|
||||||
|
mvdan.cc/sh v2.6.4+incompatible/go.mod h1:IeeQbZq+x2SUGBensq/jge5lLQbS3XT2ktyp3wrt4x8=
|
||||||
|
@@ -237,7 +237,10 @@ func (archive *Archive) AddItemsExclude(flister filelist.FileLister, exclude fil
|
|||||||
|
|
||||||
// Adds the given file or directory at "source" to the archive at "dest"
|
// Adds the given file or directory at "source" to the archive at "dest"
|
||||||
func (archive *Archive) AddItem(source string, dest string) error {
|
func (archive *Archive) AddItem(source string, dest string) error {
|
||||||
|
if osutil.HasMergedUsr() {
|
||||||
|
source = osutil.MergeUsr(source)
|
||||||
|
dest = osutil.MergeUsr(dest)
|
||||||
|
}
|
||||||
sourceStat, err := os.Lstat(source)
|
sourceStat, err := os.Lstat(source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e, ok := err.(*os.PathError)
|
e, ok := err.(*os.PathError)
|
||||||
@@ -248,6 +251,12 @@ func (archive *Archive) AddItem(source string, dest string) error {
|
|||||||
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
|
return fmt.Errorf("AddItem: failed to get stat for %q: %w", source, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A symlink to a directory doesn't have the os.ModeDir bit set, so we need
|
||||||
|
// to check if it's a symlink first
|
||||||
|
if sourceStat.Mode()&os.ModeSymlink != 0 {
|
||||||
|
return archive.addSymlink(source, dest)
|
||||||
|
}
|
||||||
|
|
||||||
if sourceStat.Mode()&os.ModeDir != 0 {
|
if sourceStat.Mode()&os.ModeDir != 0 {
|
||||||
return archive.addDir(dest)
|
return archive.addDir(dest)
|
||||||
}
|
}
|
||||||
@@ -255,6 +264,45 @@ func (archive *Archive) AddItem(source string, dest string) error {
|
|||||||
return archive.addFile(source, dest)
|
return archive.addFile(source, dest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (archive *Archive) addSymlink(source string, dest string) error {
|
||||||
|
target, err := os.Readlink(source)
|
||||||
|
if err != nil {
|
||||||
|
log.Print("addSymlink: failed to get symlink target for: ", source)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure we pick up the symlink target too
|
||||||
|
targetAbs := target
|
||||||
|
if filepath.Dir(target) == "." {
|
||||||
|
// relative symlink, make it absolute so we can add the target to the archive
|
||||||
|
targetAbs = filepath.Join(filepath.Dir(source), target)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !filepath.IsAbs(targetAbs) {
|
||||||
|
targetAbs, err = osutil.RelativeSymlinkTargetToDir(targetAbs, filepath.Dir(source))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
archive.AddItem(targetAbs, targetAbs)
|
||||||
|
|
||||||
|
// Now add the symlink itself
|
||||||
|
destFilename := strings.TrimPrefix(dest, "/")
|
||||||
|
|
||||||
|
archive.items.add(archiveItem{
|
||||||
|
sourcePath: source,
|
||||||
|
header: &cpio.Header{
|
||||||
|
Name: destFilename,
|
||||||
|
Linkname: target,
|
||||||
|
Mode: 0644 | cpio.ModeSymlink,
|
||||||
|
Size: int64(len(target)),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (archive *Archive) addFile(source string, dest string) error {
|
func (archive *Archive) addFile(source string, dest string) error {
|
||||||
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
if err := archive.addDir(filepath.Dir(dest)); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -266,42 +314,6 @@ func (archive *Archive) addFile(source string, dest string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Symlink: write symlink to archive then set 'file' to link target
|
|
||||||
if sourceStat.Mode()&os.ModeSymlink != 0 {
|
|
||||||
// log.Printf("File %q is a symlink", file)
|
|
||||||
target, err := os.Readlink(source)
|
|
||||||
if err != nil {
|
|
||||||
log.Print("addFile: failed to get symlink target: ", source)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
destFilename := strings.TrimPrefix(dest, "/")
|
|
||||||
|
|
||||||
archive.items.add(archiveItem{
|
|
||||||
sourcePath: source,
|
|
||||||
header: &cpio.Header{
|
|
||||||
Name: destFilename,
|
|
||||||
Linkname: target,
|
|
||||||
Mode: 0644 | cpio.ModeSymlink,
|
|
||||||
Size: int64(len(target)),
|
|
||||||
// Checksum: 1,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
if filepath.Dir(target) == "." {
|
|
||||||
target = filepath.Join(filepath.Dir(source), target)
|
|
||||||
}
|
|
||||||
// make sure target is an absolute path
|
|
||||||
if !filepath.IsAbs(target) {
|
|
||||||
target, err = osutil.RelativeSymlinkTargetToDir(target, filepath.Dir(source))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = archive.addFile(target, target)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
destFilename := strings.TrimPrefix(dest, "/")
|
destFilename := strings.TrimPrefix(dest, "/")
|
||||||
|
|
||||||
archive.items.add(archiveItem{
|
archive.items.add(archiveItem{
|
||||||
@@ -404,6 +416,12 @@ func (archive *Archive) writeCompressed(path string, mode os.FileMode) (err erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (archive *Archive) writeCpio() error {
|
func (archive *Archive) writeCpio() error {
|
||||||
|
// Just in case
|
||||||
|
if osutil.HasMergedUsr() {
|
||||||
|
archive.addSymlink("/bin", "/bin")
|
||||||
|
archive.addSymlink("/sbin", "/sbin")
|
||||||
|
archive.addSymlink("/lib", "/lib")
|
||||||
|
}
|
||||||
// having a transient function for actually adding files to the archive
|
// having a transient function for actually adding files to the archive
|
||||||
// allows the deferred fd.close to run after every copy and prevent having
|
// allows the deferred fd.close to run after every copy and prevent having
|
||||||
// tons of open file handles until the copying is all done
|
// tons of open file handles until the copying is all done
|
||||||
@@ -418,19 +436,19 @@ func (archive *Archive) writeCpio() error {
|
|||||||
if header.Mode.IsRegular() {
|
if header.Mode.IsRegular() {
|
||||||
fd, err := os.Open(source)
|
fd, err := os.Open(source)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("archive.writeCpio: uname to open file %q, %w", source, err)
|
return fmt.Errorf("archive.writeCpio: Unable to open file %q, %w", source, err)
|
||||||
}
|
}
|
||||||
defer fd.Close()
|
defer fd.Close()
|
||||||
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
|
if _, err := io.Copy(archive.cpioWriter, fd); err != nil {
|
||||||
return fmt.Errorf("archive.writeCpio: unable to write out archive: %w", err)
|
return fmt.Errorf("archive.writeCpio: Couldn't process %q: %w", source, err)
|
||||||
}
|
}
|
||||||
} else if header.Linkname != "" {
|
} else if header.Linkname != "" {
|
||||||
// the contents of a symlink is just need the link name
|
// the contents of a symlink is just need the link name
|
||||||
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
|
if _, err := archive.cpioWriter.Write([]byte(header.Linkname)); err != nil {
|
||||||
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %w", err)
|
return fmt.Errorf("archive.writeCpio: unable to write out symlink: %q -> %q: %w", source, header.Linkname, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("archive.writeCpio: unknown type for file: %s", source)
|
return fmt.Errorf("archive.writeCpio: unknown type for file: %q: %d", source, header.Mode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -78,12 +78,17 @@ func (b *BootDeploy) Run() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
|
// boot-deploy -i initramfs -k vmlinuz-postmarketos-rockchip -d /tmp/cpio -o /tmp/foo initramfs-extra
|
||||||
cmd := exec.Command("boot-deploy",
|
args := []string{
|
||||||
"-i", "initramfs",
|
"-i", "initramfs",
|
||||||
"-k", kernFilename,
|
"-k", kernFilename,
|
||||||
"-d", b.inDir,
|
"-d", b.inDir,
|
||||||
"-o", b.outDir,
|
"-o", b.outDir,
|
||||||
"initramfs-extra")
|
}
|
||||||
|
|
||||||
|
if b.devinfo.CreateInitfsExtra {
|
||||||
|
args = append(args, "initramfs-extra")
|
||||||
|
}
|
||||||
|
cmd := exec.Command("boot-deploy", args...)
|
||||||
|
|
||||||
cmd.Stdout = os.Stdout
|
cmd.Stdout = os.Stdout
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = os.Stderr
|
||||||
@@ -95,13 +100,16 @@ func (b *BootDeploy) Run() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getKernelPath(outDir string, zboot bool) ([]string, error) {
|
func getKernelPath(outDir string, zboot bool) ([]string, error) {
|
||||||
kernFile := "vmlinuz*"
|
var kernels []string
|
||||||
|
|
||||||
if zboot {
|
if zboot {
|
||||||
kernFile = "linux.efi"
|
kernels, _ = filepath.Glob(filepath.Join(outDir, "linux.efi"))
|
||||||
|
if len(kernels) > 0 {
|
||||||
|
return kernels, nil
|
||||||
|
}
|
||||||
|
// else fallback to vmlinuz* below
|
||||||
}
|
}
|
||||||
|
|
||||||
var kernels []string
|
kernFile := "vmlinuz*"
|
||||||
kernels, _ = filepath.Glob(filepath.Join(outDir, kernFile))
|
kernels, _ = filepath.Glob(filepath.Join(outDir, kernFile))
|
||||||
if len(kernels) == 0 {
|
if len(kernels) == 0 {
|
||||||
return nil, errors.New("Unable to find any kernels at " + filepath.Join(outDir, kernFile))
|
return nil, errors.New("Unable to find any kernels at " + filepath.Join(outDir, kernFile))
|
||||||
|
@@ -44,7 +44,7 @@ func (h *HookDirs) List() (*filelist.FileList, error) {
|
|||||||
|
|
||||||
s := bufio.NewScanner(f)
|
s := bufio.NewScanner(f)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
dir := s.Text()
|
dir := strings.TrimSpace(s.Text())
|
||||||
if len(dir) == 0 || strings.HasPrefix(dir, "#") {
|
if len(dir) == 0 || strings.HasPrefix(dir, "#") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/filelist"
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||||
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/osutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type HookFiles struct {
|
type HookFiles struct {
|
||||||
@@ -58,12 +59,15 @@ func slurpFiles(fd io.Reader) (*filelist.FileList, error) {
|
|||||||
|
|
||||||
s := bufio.NewScanner(fd)
|
s := bufio.NewScanner(fd)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
line := s.Text()
|
line := strings.TrimSpace(s.Text())
|
||||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
src, dest, has_dest := strings.Cut(line, ":")
|
src, dest, has_dest := strings.Cut(line, ":")
|
||||||
|
if osutil.HasMergedUsr() {
|
||||||
|
src = osutil.MergeUsr(src)
|
||||||
|
}
|
||||||
|
|
||||||
fFiles, err := misc.GetFiles([]string{src}, true)
|
fFiles, err := misc.GetFiles([]string{src}, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -33,8 +33,14 @@ func (m *Modules) List() (*filelist.FileList, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
files := filelist.NewFileList()
|
files := filelist.NewFileList()
|
||||||
|
libDir := "/usr/lib/modules"
|
||||||
|
if exists, err := misc.Exists(libDir); !exists {
|
||||||
|
libDir = "/lib/modules"
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, fmt.Errorf("received unexpected error when getting status for %q: %w", libDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
modDir := filepath.Join("/lib/modules", kernVer)
|
modDir := filepath.Join(libDir, kernVer)
|
||||||
if exists, err := misc.Exists(modDir); !exists {
|
if exists, err := misc.Exists(modDir); !exists {
|
||||||
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
// dir /lib/modules/<kernel> if kernel built without module support, so just print a message
|
||||||
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
log.Printf("-- kernel module directory not found: %q, not including modules", modDir)
|
||||||
@@ -77,7 +83,7 @@ func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
|
|||||||
files := filelist.NewFileList()
|
files := filelist.NewFileList()
|
||||||
s := bufio.NewScanner(fd)
|
s := bufio.NewScanner(fd)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
line := s.Text()
|
line := strings.TrimSpace(s.Text())
|
||||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -97,8 +103,8 @@ func slurpModules(fd io.Reader, modDir string) (*filelist.FileList, error) {
|
|||||||
}
|
}
|
||||||
} else if dir == "" {
|
} else if dir == "" {
|
||||||
// item is a module name
|
// item is a module name
|
||||||
if modFilelist, err := getModule(s.Text(), modDir); err != nil {
|
if modFilelist, err := getModule(line, modDir); err != nil {
|
||||||
return nil, fmt.Errorf("unable to get module file %q: %w", s.Text(), err)
|
return nil, fmt.Errorf("unable to get module file %q: %w", line, err)
|
||||||
} else {
|
} else {
|
||||||
for _, file := range modFilelist {
|
for _, file := range modFilelist {
|
||||||
files.Add(file, file)
|
files.Add(file, file)
|
||||||
@@ -118,7 +124,9 @@ func getModulesInDir(modPath string) (files []string, err error) {
|
|||||||
// Unable to walk path
|
// Unable to walk path
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if filepath.Ext(path) != ".ko" && filepath.Ext(path) != ".xz" {
|
// this assumes module names are in the format <name>.ko[.format],
|
||||||
|
// where ".format" (e.g. ".gz") is optional.
|
||||||
|
if !strings.Contains(".ko", path) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
files = append(files, path)
|
files = append(files, path)
|
||||||
@@ -180,7 +188,7 @@ func getModuleDeps(modName string, modulesDep io.Reader) ([]string, error) {
|
|||||||
|
|
||||||
s := bufio.NewScanner(modulesDep)
|
s := bufio.NewScanner(modulesDep)
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
line := s.Text()
|
line := strings.TrimSpace(s.Text())
|
||||||
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
if len(line) == 0 || strings.HasPrefix(line, "#") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@@ -18,6 +18,7 @@ func TestStripExts(t *testing.T) {
|
|||||||
{"another_file", "another_file"},
|
{"another_file", "another_file"},
|
||||||
{"a.b.c.d.e.f.g.h.i", "a"},
|
{"a.b.c.d.e.f.g.h.i", "a"},
|
||||||
{"virtio_blk.ko", "virtio_blk"},
|
{"virtio_blk.ko", "virtio_blk"},
|
||||||
|
{"virtio_blk.ko ", "virtio_blk"},
|
||||||
}
|
}
|
||||||
for _, table := range tables {
|
for _, table := range tables {
|
||||||
out := stripExts(table.in)
|
out := stripExts(table.in)
|
||||||
|
@@ -3,6 +3,7 @@ package misc
|
|||||||
import (
|
import (
|
||||||
"debug/elf"
|
"debug/elf"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
@@ -39,12 +40,42 @@ func getFile(file string, required bool) (files []string, err error) {
|
|||||||
return RemoveDuplicates(files), nil
|
return RemoveDuplicates(files), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the file is a symlink we need to do this to prevent an infinite recursion
|
||||||
|
// loop:
|
||||||
|
// Symlinks need special handling to prevent infinite recursion:
|
||||||
|
// 1) add the symlink to the list of files
|
||||||
|
// 2) set file to dereferenced target
|
||||||
|
// 4) continue this function to either walk it if the target is a dir or add the
|
||||||
|
// target to the list of files
|
||||||
|
if s, err := os.Lstat(file); err != nil {
|
||||||
|
return files, err
|
||||||
|
} else if s.Mode()&fs.ModeSymlink != 0 {
|
||||||
|
files = append(files, file)
|
||||||
|
if target, err := filepath.EvalSymlinks(file); err != nil {
|
||||||
|
return files, err
|
||||||
|
} else {
|
||||||
|
file = target
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fileInfo, err := os.Stat(file)
|
fileInfo, err := os.Stat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if required {
|
// Check if there is a Zstd-compressed version of the file
|
||||||
return files, fmt.Errorf("getFile: failed to stat file %q: %w", file, err)
|
fileZstd := file + ".zst" // .zst is the extension used by linux-firmware
|
||||||
|
fileInfoZstd, errZstd := os.Stat(fileZstd)
|
||||||
|
|
||||||
|
if errZstd == nil {
|
||||||
|
file = fileZstd
|
||||||
|
fileInfo = fileInfoZstd
|
||||||
|
// Unset nil so we don't retain the error from the os.Stat call for the uncompressed version.
|
||||||
|
err = nil
|
||||||
|
} else {
|
||||||
|
if required {
|
||||||
|
return files, fmt.Errorf("getFile: failed to stat file %q: %w (also tried %q: %w)", file, err, fileZstd, errZstd)
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
}
|
}
|
||||||
return files, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if fileInfo.IsDir() {
|
if fileInfo.IsDir() {
|
||||||
@@ -108,6 +139,7 @@ func getDeps(file string, parents map[string]struct{}) (files []string, err erro
|
|||||||
"/usr/lib",
|
"/usr/lib",
|
||||||
"/lib",
|
"/lib",
|
||||||
"/usr/lib/expect*",
|
"/usr/lib/expect*",
|
||||||
|
"/usr/lib/systemd",
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, lib := range libs {
|
for _, lib := range libs {
|
||||||
|
149
internal/misc/getfiles_test.go
Normal file
149
internal/misc/getfiles_test.go
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
// Copyright 2025 Clayton Craft <clayton@craftyguy.net>
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
package misc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetFile(t *testing.T) {
|
||||||
|
subtests := []struct {
|
||||||
|
name string
|
||||||
|
setup func(tmpDir string) (inputPath string, expectedFiles []string, err error)
|
||||||
|
required bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "symlink to directory - no infinite recursion",
|
||||||
|
setup: func(tmpDir string) (string, []string, error) {
|
||||||
|
// Create target directory with files
|
||||||
|
targetDir := filepath.Join(tmpDir, "target")
|
||||||
|
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
testFile1 := filepath.Join(targetDir, "file1.txt")
|
||||||
|
testFile2 := filepath.Join(targetDir, "file2.txt")
|
||||||
|
if err := os.WriteFile(testFile1, []byte("content1"), 0644); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(testFile2, []byte("content2"), 0644); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create symlink pointing to target directory
|
||||||
|
symlinkPath := filepath.Join(tmpDir, "symlink")
|
||||||
|
if err := os.Symlink(targetDir, symlinkPath); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := []string{symlinkPath, testFile1, testFile2}
|
||||||
|
return symlinkPath, expected, nil
|
||||||
|
},
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "symlink to file - returns both symlink and target",
|
||||||
|
setup: func(tmpDir string) (string, []string, error) {
|
||||||
|
// Create target file
|
||||||
|
targetFile := filepath.Join(tmpDir, "target.txt")
|
||||||
|
if err := os.WriteFile(targetFile, []byte("content"), 0644); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create symlink pointing to target file
|
||||||
|
symlinkPath := filepath.Join(tmpDir, "symlink.txt")
|
||||||
|
if err := os.Symlink(targetFile, symlinkPath); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := []string{symlinkPath, targetFile}
|
||||||
|
return symlinkPath, expected, nil
|
||||||
|
},
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "regular file",
|
||||||
|
setup: func(tmpDir string) (string, []string, error) {
|
||||||
|
regularFile := filepath.Join(tmpDir, "regular.txt")
|
||||||
|
if err := os.WriteFile(regularFile, []byte("content"), 0644); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := []string{regularFile}
|
||||||
|
return regularFile, expected, nil
|
||||||
|
},
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "regular directory",
|
||||||
|
setup: func(tmpDir string) (string, []string, error) {
|
||||||
|
// Create directory with files
|
||||||
|
dirPath := filepath.Join(tmpDir, "testdir")
|
||||||
|
if err := os.MkdirAll(dirPath, 0755); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
file1 := filepath.Join(dirPath, "file1.txt")
|
||||||
|
file2 := filepath.Join(dirPath, "subdir", "file2.txt")
|
||||||
|
|
||||||
|
if err := os.WriteFile(file1, []byte("content1"), 0644); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(filepath.Dir(file2), 0755); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(file2, []byte("content2"), 0644); err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := []string{file1, file2}
|
||||||
|
return dirPath, expected, nil
|
||||||
|
},
|
||||||
|
required: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, st := range subtests {
|
||||||
|
t.Run(st.name, func(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
|
inputPath, expectedFiles, err := st.setup(tmpDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("setup failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add timeout protection for infinite recursion test
|
||||||
|
done := make(chan struct{})
|
||||||
|
var files []string
|
||||||
|
var getFileErr error
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(done)
|
||||||
|
files, getFileErr = getFile(inputPath, st.required)
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
if getFileErr != nil {
|
||||||
|
t.Fatalf("getFile failed: %v", getFileErr)
|
||||||
|
}
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
t.Fatal("getFile appears to be in infinite recursion (timeout)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort for comparison
|
||||||
|
sort.Strings(expectedFiles)
|
||||||
|
sort.Strings(files)
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(expectedFiles, files) {
|
||||||
|
t.Fatalf("expected: %q, got: %q", expectedFiles, files)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@@ -10,6 +10,39 @@ import (
|
|||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Try to guess whether the system has merged dirs under /usr
|
||||||
|
func HasMergedUsr() bool {
|
||||||
|
for _, dir := range []string{"/bin", "/lib"} {
|
||||||
|
stat, err := os.Lstat(dir)
|
||||||
|
if err != nil {
|
||||||
|
// TODO: probably because the dir doesn't exist... so
|
||||||
|
// should we assume that it's because the system has some weird
|
||||||
|
// implementation of "merge /usr"?
|
||||||
|
return true
|
||||||
|
} else if stat.Mode()&os.ModeSymlink == 0 {
|
||||||
|
// Not a symlink, so must not be merged /usr
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Converts given path to one supported by a merged /usr config.
|
||||||
|
// E.g., /bin/foo becomes /usr/bin/foo, /lib/bar becomes /usr/lib/bar
|
||||||
|
// See: https://www.freedesktop.org/wiki/Software/systemd/TheCaseForTheUsrMerge
|
||||||
|
func MergeUsr(file string) string {
|
||||||
|
|
||||||
|
// Prepend /usr to supported paths
|
||||||
|
for _, prefix := range []string{"/bin", "/sbin", "/lib", "/lib64"} {
|
||||||
|
if strings.HasPrefix(file, prefix) {
|
||||||
|
file = filepath.Join("/usr", file)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return file
|
||||||
|
}
|
||||||
|
|
||||||
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
// Converts a relative symlink target path (e.g. ../../lib/foo.so), that is
|
||||||
// absolute path
|
// absolute path
|
||||||
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
func RelativeSymlinkTargetToDir(symPath string, dir string) (string, error) {
|
||||||
|
49
internal/osutil/osutil_test.go
Normal file
49
internal/osutil/osutil_test.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
// Copyright 2024 Clayton Craft <clayton@craftyguy.net>
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
package osutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMergeUsr(t *testing.T) {
|
||||||
|
subtests := []struct {
|
||||||
|
in string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
in: "/bin/foo",
|
||||||
|
expected: "/usr/bin/foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "/sbin/foo",
|
||||||
|
expected: "/usr/sbin/foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "/usr/sbin/foo",
|
||||||
|
expected: "/usr/sbin/foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "/usr/bin/foo",
|
||||||
|
expected: "/usr/bin/foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "/lib/foo.so",
|
||||||
|
expected: "/usr/lib/foo.so",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
in: "/lib64/foo.so",
|
||||||
|
expected: "/usr/lib64/foo.so",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, st := range subtests {
|
||||||
|
t.Run(st.in, func(t *testing.T) {
|
||||||
|
out := MergeUsr(st.in)
|
||||||
|
if out != st.expected {
|
||||||
|
t.Fatalf("expected: %q, got: %q\n", st.expected, out)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@@ -4,14 +4,14 @@
|
|||||||
package deviceinfo
|
package deviceinfo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/mvdan/sh/shell"
|
||||||
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
"gitlab.com/postmarketOS/postmarketos-mkinitfs/internal/misc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -20,6 +20,8 @@ type DeviceInfo struct {
|
|||||||
InitfsExtraCompression string
|
InitfsExtraCompression string
|
||||||
UbootBoardname string
|
UbootBoardname string
|
||||||
GenerateSystemdBoot string
|
GenerateSystemdBoot string
|
||||||
|
FormatVersion string
|
||||||
|
CreateInitfsExtra bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reads the relevant entries from "file" into DeviceInfo struct
|
// Reads the relevant entries from "file" into DeviceInfo struct
|
||||||
@@ -32,13 +34,7 @@ func (d *DeviceInfo) ReadDeviceinfo(file string) error {
|
|||||||
return fmt.Errorf("unexpected error getting status for %q: %s", file, err)
|
return fmt.Errorf("unexpected error getting status for %q: %s", file, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fd, err := os.Open(file)
|
if err := d.unmarshal(file); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer fd.Close()
|
|
||||||
|
|
||||||
if err := d.unmarshal(fd); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,53 +42,44 @@ func (d *DeviceInfo) ReadDeviceinfo(file string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshals a deviceinfo into a DeviceInfo struct
|
// Unmarshals a deviceinfo into a DeviceInfo struct
|
||||||
func (d *DeviceInfo) unmarshal(r io.Reader) error {
|
func (d *DeviceInfo) unmarshal(file string) error {
|
||||||
s := bufio.NewScanner(r)
|
ctx, cancelCtx := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
|
||||||
for s.Scan() {
|
defer cancelCtx()
|
||||||
line := s.Text()
|
vars, err := shell.SourceFile(ctx, file)
|
||||||
if strings.HasPrefix(line, "#") {
|
if err != nil {
|
||||||
continue
|
return fmt.Errorf("parsing deviceinfo %q failed: %w", file, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// line isn't setting anything, so just ignore it
|
|
||||||
if !strings.Contains(line, "=") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// sometimes line has a comment at the end after setting an option
|
|
||||||
line = strings.SplitN(line, "#", 2)[0]
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
|
|
||||||
// must support having '=' in the value (e.g. kernel cmdline)
|
|
||||||
parts := strings.SplitN(line, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
name, val := parts[0], parts[1]
|
|
||||||
val = strings.ReplaceAll(val, "\"", "")
|
|
||||||
|
|
||||||
if name == "deviceinfo_format_version" && val != "0" {
|
|
||||||
return fmt.Errorf("deviceinfo format version %q is not supported", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldName := nameToField(name)
|
|
||||||
|
|
||||||
if fieldName == "" {
|
|
||||||
return fmt.Errorf("error parsing deviceinfo line, invalid format: %s", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
for k, v := range vars {
|
||||||
|
fieldName := nameToField(k)
|
||||||
field := reflect.ValueOf(d).Elem().FieldByName(fieldName)
|
field := reflect.ValueOf(d).Elem().FieldByName(fieldName)
|
||||||
if !field.IsValid() {
|
if !field.IsValid() {
|
||||||
// an option that meets the deviceinfo "specification", but isn't
|
// an option that meets the deviceinfo "specification", but isn't
|
||||||
// one we care about in this module
|
// one we care about in this module
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
field.SetString(val)
|
switch field.Interface().(type) {
|
||||||
|
case string:
|
||||||
|
field.SetString(v.String())
|
||||||
|
case bool:
|
||||||
|
if v, err := strconv.ParseBool(v.String()); err != nil {
|
||||||
|
return fmt.Errorf("deviceinfo %q has unsupported type for field %q, expected 'bool'", file, k)
|
||||||
|
} else {
|
||||||
|
field.SetBool(v)
|
||||||
|
}
|
||||||
|
case int:
|
||||||
|
if v, err := strconv.ParseInt(v.String(), 10, 32); err != nil {
|
||||||
|
return fmt.Errorf("deviceinfo %q has unsupported type for field %q, expected 'int'", file, k)
|
||||||
|
} else {
|
||||||
|
field.SetInt(v)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("deviceinfo %q has unsupported type for field %q", file, k)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
log.Print("unable to parse deviceinfo: ", err)
|
if d.FormatVersion != "0" {
|
||||||
return err
|
return fmt.Errorf("deviceinfo %q has an unsupported format version %q", file, d.FormatVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -116,3 +103,25 @@ func nameToField(name string) string {
|
|||||||
|
|
||||||
return field
|
return field
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d DeviceInfo) String() string {
|
||||||
|
return fmt.Sprintf(`{
|
||||||
|
%s: %v
|
||||||
|
%s: %v
|
||||||
|
%s: %v
|
||||||
|
%s: %v
|
||||||
|
%s: %v
|
||||||
|
%s: %v
|
||||||
|
%s: %v
|
||||||
|
%s: %v
|
||||||
|
}`,
|
||||||
|
"deviceinfo_format_version", d.FormatVersion,
|
||||||
|
"deviceinfo_", d.FormatVersion,
|
||||||
|
"deviceinfo_initfs_compression", d.InitfsCompression,
|
||||||
|
"deviceinfo_initfs_extra_compression", d.InitfsCompression,
|
||||||
|
"deviceinfo_ubootBoardname", d.UbootBoardname,
|
||||||
|
"deviceinfo_generateSystemdBoot", d.GenerateSystemdBoot,
|
||||||
|
"deviceinfo_formatVersion", d.FormatVersion,
|
||||||
|
"deviceinfo_createInitfsExtra", d.CreateInitfsExtra,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
@@ -4,8 +4,6 @@
|
|||||||
package deviceinfo
|
package deviceinfo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
@@ -44,6 +42,7 @@ func TestNameToField(t *testing.T) {
|
|||||||
{"modules_initfs", "ModulesInitfs"},
|
{"modules_initfs", "ModulesInitfs"},
|
||||||
{"deviceinfo_initfs_compression___", "InitfsCompression"},
|
{"deviceinfo_initfs_compression___", "InitfsCompression"},
|
||||||
{"deviceinfo_initfs_extra_compression", "InitfsExtraCompression"},
|
{"deviceinfo_initfs_extra_compression", "InitfsExtraCompression"},
|
||||||
|
{"deviceinfo_create_initfs_extra", "CreateInitfsExtra"},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, table := range tables {
|
for _, table := range tables {
|
||||||
@@ -59,37 +58,25 @@ func TestUnmarshal(t *testing.T) {
|
|||||||
tables := []struct {
|
tables := []struct {
|
||||||
// field is just used for reflection within the test, so it must be a
|
// field is just used for reflection within the test, so it must be a
|
||||||
// valid DeviceInfo field
|
// valid DeviceInfo field
|
||||||
field string
|
file string
|
||||||
in string
|
expected DeviceInfo
|
||||||
expected string
|
|
||||||
}{
|
}{
|
||||||
{"InitfsCompression", "deviceinfo_initfs_compression=\"gzip:-9\"\n", "gzip:-9"},
|
{"./test_resources/deviceinfo-unmarshal-1", DeviceInfo{
|
||||||
// line with multiple '='
|
FormatVersion: "0",
|
||||||
{"InitfsCompression", "deviceinfo_initfs_compression=zstd:--foo=1 -T0 --bar=bazz", "zstd:--foo=1 -T0 --bar=bazz"},
|
UbootBoardname: "foobar-bazz",
|
||||||
// empty option
|
InitfsCompression: "zstd:--foo=1 -T0 --bar=bazz",
|
||||||
{"InitfsCompression", "deviceinfo_initfs_compression=\"\"\n", ""},
|
InitfsExtraCompression: "",
|
||||||
// line with comment at the end
|
CreateInitfsExtra: true,
|
||||||
{"", "# this is a comment!\n", ""},
|
},
|
||||||
// empty lines are fine
|
},
|
||||||
{"", "", ""},
|
|
||||||
// line with whitepace characters only
|
|
||||||
{"", " \t \n\r", ""},
|
|
||||||
}
|
}
|
||||||
var d DeviceInfo
|
var d DeviceInfo
|
||||||
for _, table := range tables {
|
for _, table := range tables {
|
||||||
testName := fmt.Sprintf("unmarshal::'%s':", strings.ReplaceAll(table.in, "\n", "\\n"))
|
if err := d.unmarshal(table.file); err != nil {
|
||||||
if err := d.unmarshal(strings.NewReader(table.in)); err != nil {
|
t.Error(err)
|
||||||
t.Errorf("%s received an unexpected err: ", err)
|
|
||||||
}
|
}
|
||||||
|
if d != table.expected {
|
||||||
// Check against expected value
|
t.Errorf("expected: %s, got: %s", table.expected, d)
|
||||||
field := reflect.ValueOf(&d).Elem().FieldByName(table.field)
|
|
||||||
out := ""
|
|
||||||
if table.field != "" {
|
|
||||||
out = field.String()
|
|
||||||
}
|
|
||||||
if out != table.expected {
|
|
||||||
t.Errorf("%s expected: %q, got: %q", testName, table.expected, out)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1,2 +1,3 @@
|
|||||||
|
deviceinfo_format_version="0"
|
||||||
deviceinfo_initfs_compression="gz -9"
|
deviceinfo_initfs_compression="gz -9"
|
||||||
deviceinfo_mesa_driver="panfrost"
|
deviceinfo_mesa_driver="panfrost"
|
||||||
|
@@ -1 +1,2 @@
|
|||||||
deviceinfo_mesa_driver="msm"
|
deviceinfo_format_version="0"
|
||||||
|
deviceinfo_mesa_driver="msm"
|
||||||
|
7
pkgs/deviceinfo/test_resources/deviceinfo-unmarshal-1
Normal file
7
pkgs/deviceinfo/test_resources/deviceinfo-unmarshal-1
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
deviceinfo_format_version="0"
|
||||||
|
deviceinfo_uboot_boardname="foobar-bazz"
|
||||||
|
# line with multiple =
|
||||||
|
deviceinfo_initfs_compression="zstd:--foo=1 -T0 --bar=bazz"
|
||||||
|
# empty option
|
||||||
|
deviceinfo_initfs_extra_compression=""
|
||||||
|
deviceinfo_create_initfs_extra="true" # in-line comment that should be ignored
|
Reference in New Issue
Block a user