mirror of
https://github.com/pterodactyl/wings.git
synced 2025-12-10 00:32:17 -06:00
Compare commits
145 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ae6c62905b | ||
|
|
e4f139a656 | ||
|
|
b75ab17fe2 | ||
|
|
149b796849 | ||
|
|
503b87c240 | ||
|
|
60cc3ad30c | ||
|
|
daefc0de79 | ||
|
|
d9b1675a59 | ||
|
|
ee922588e0 | ||
|
|
7bb7696307 | ||
|
|
695b2eaf86 | ||
|
|
e76dc74ddb | ||
|
|
0d9b0863f9 | ||
|
|
326db212e9 | ||
|
|
798cbe567d | ||
|
|
8b65328390 | ||
|
|
746c7afbfb | ||
|
|
c93a1da141 | ||
|
|
06a3167754 | ||
|
|
ccae513224 | ||
|
|
56bb2ce298 | ||
|
|
d1f2c4e641 | ||
|
|
b6f07bc959 | ||
|
|
b7d442ecc0 | ||
|
|
a94d4ec5e5 | ||
|
|
35d2e0f040 | ||
|
|
d2b4e983f5 | ||
|
|
25aaf127f6 | ||
|
|
cb391bcbb8 | ||
|
|
2a94eb0af5 | ||
|
|
ae1192f760 | ||
|
|
3855eecb60 | ||
|
|
3ea9e6991a | ||
|
|
9533f4f04a | ||
|
|
0bc3df9306 | ||
|
|
a4e16748b0 | ||
|
|
3d90ac5909 | ||
|
|
74fd19c86f | ||
|
|
cecdc8c612 | ||
|
|
489af48eb9 | ||
|
|
104fe1aace | ||
|
|
81e8c9a8c4 | ||
|
|
9ff918bf0b | ||
|
|
d5097e5f59 | ||
|
|
e311206d6b | ||
|
|
f18726c874 | ||
|
|
49b04fe9bb | ||
|
|
9535aae52e | ||
|
|
4b4e8f8fa0 | ||
|
|
91e016249c | ||
|
|
59905a6b69 | ||
|
|
d49607de0e | ||
|
|
91ed7f25e1 | ||
|
|
a4ff433d95 | ||
|
|
7f63162d1d | ||
|
|
d0e7332881 | ||
|
|
5e9a1c7139 | ||
|
|
407b783aa5 | ||
|
|
25966e7838 | ||
|
|
77153ffbb1 | ||
|
|
c6c235dbc0 | ||
|
|
a55277da47 | ||
|
|
76a9f6dc5a | ||
|
|
0e96ef3edf | ||
|
|
1dee350268 | ||
|
|
ec14db6142 | ||
|
|
c83875ffc0 | ||
|
|
b4dc19fca1 | ||
|
|
a2be26574c | ||
|
|
8045318e44 | ||
|
|
024fe548ed | ||
|
|
82ae64b4c6 | ||
|
|
f221cde754 | ||
|
|
63c4c1ce57 | ||
|
|
b4a9a1c5de | ||
|
|
7daaaaac18 | ||
|
|
708cdd0ba8 | ||
|
|
adb2b26ae0 | ||
|
|
6c9d3670c8 | ||
|
|
18306badaf | ||
|
|
34bd2b54e5 | ||
|
|
d023c97334 | ||
|
|
8744e64f1d | ||
|
|
3b88bbc7aa | ||
|
|
d739948989 | ||
|
|
ac260bd5ee | ||
|
|
2f4a0d7262 | ||
|
|
1d8b383682 | ||
|
|
934bf2493d | ||
|
|
29e4425e21 | ||
|
|
5a15612754 | ||
|
|
ad1ae862a9 | ||
|
|
3114a3b82e | ||
|
|
500f217514 | ||
|
|
9ffbcdcdb1 | ||
|
|
9b341db2db | ||
|
|
71c5338549 | ||
|
|
326f115f5b | ||
|
|
06614de99d | ||
|
|
2b0e35360b | ||
|
|
202f2229a9 | ||
|
|
baf1f0b5cd | ||
|
|
ec54371b86 | ||
|
|
1d5090957b | ||
|
|
5415f8ae07 | ||
|
|
617fbcbf27 | ||
|
|
c152e36101 | ||
|
|
5b0422d756 | ||
|
|
f1c5bbd42d | ||
|
|
1c5ddcd20c | ||
|
|
a877305202 | ||
|
|
1f77d2256b | ||
|
|
ac9bd1d95e | ||
|
|
979df34392 | ||
|
|
8f129931d5 | ||
|
|
2931430eb8 | ||
|
|
99b9924a4a | ||
|
|
d649bb1116 | ||
|
|
1477b7034b | ||
|
|
d1c0ca5260 | ||
|
|
27f3e76c77 | ||
|
|
eadbe920fe | ||
|
|
3e804b81fe | ||
|
|
f68965e7c9 | ||
|
|
accc833e87 | ||
|
|
d4bfdd4548 | ||
|
|
2641080007 | ||
|
|
10c58d3dc0 | ||
|
|
9496b1f7e5 | ||
|
|
2f1b67ed35 | ||
|
|
579278b4de | ||
|
|
d30ab7b9bd | ||
|
|
d1fd0465e4 | ||
|
|
79eb8e1365 | ||
|
|
2cb201d202 | ||
|
|
fc1ffc8cd3 | ||
|
|
48c55af373 | ||
|
|
7a59d0929c | ||
|
|
9b5eaf44df | ||
|
|
438e5fdbe9 | ||
|
|
a866493d0a | ||
|
|
c9d92f7bac | ||
|
|
aa8ffdfcf7 | ||
|
|
8d7e23f542 | ||
|
|
bd26d6eefd |
21
.editorconfig
Normal file
21
.editorconfig
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
indent_style = tab
|
||||||
|
indent_size = 4
|
||||||
|
tab_width = 4
|
||||||
|
end_of_line = lf
|
||||||
|
charset = utf-8
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
[*.go]
|
||||||
|
max_line_length = 100
|
||||||
|
|
||||||
|
[*.md]
|
||||||
|
trim_trailing_whitespace = false
|
||||||
|
|
||||||
|
[*.{md,nix,yaml}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
tab_width = 2
|
||||||
8
.envrc
Normal file
8
.envrc
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
|
# Load the flake's `devShells.${currentSystem}.default`.
|
||||||
|
if ! use flake .; then
|
||||||
|
echo 'The development shell was unable to be built.' >&2
|
||||||
|
echo 'The development environment was not loaded.' >&2
|
||||||
|
echo 'Please make the necessary changes in flake.nix to fix any issues and hit enter to try again.' >&2
|
||||||
|
fi
|
||||||
10
.github/dependabot.yaml
vendored
Normal file
10
.github/dependabot.yaml
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: github-actions
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: monthly
|
||||||
|
- package-ecosystem: gomod
|
||||||
|
directory: /
|
||||||
|
schedule:
|
||||||
|
interval: weekly
|
||||||
28
.github/workflows/codeql.yaml
vendored
28
.github/workflows/codeql.yaml
vendored
@ -13,30 +13,26 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
analyze:
|
analyze:
|
||||||
name: Analyze
|
name: Analyze
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-24.04
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- language: go
|
||||||
|
build-mode: autobuild
|
||||||
permissions:
|
permissions:
|
||||||
actions: read
|
actions: read
|
||||||
contents: read
|
contents: read
|
||||||
security-events: write
|
security-events: write
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
language:
|
|
||||||
- go
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Code Checkout
|
- name: Code checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v2
|
uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
|
build-mode: ${{ matrix.build-mode }}
|
||||||
- name: Autobuild
|
|
||||||
uses: github/codeql-action/autobuild@v2
|
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v2
|
uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5
|
||||||
|
|||||||
25
.github/workflows/docker.yaml
vendored
25
.github/workflows/docker.yaml
vendored
@ -11,18 +11,21 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
build-and-push:
|
build-and-push:
|
||||||
name: Build and Push
|
name: Build and Push
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-24.04
|
||||||
# Always run against a tag, even if the commit into the tag has [docker skip] within the commit message.
|
# Always run against a tag, even if the commit into the tag has [docker skip] within the commit message.
|
||||||
if: "!contains(github.ref, 'develop') || (!contains(github.event.head_commit.message, 'skip docker') && !contains(github.event.head_commit.message, 'docker skip'))"
|
if: "!contains(github.ref, 'develop') || (!contains(github.event.head_commit.message, 'skip docker') && !contains(github.event.head_commit.message, 'docker skip'))"
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
steps:
|
steps:
|
||||||
- name: Code checkout
|
- name: Code checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||||
|
|
||||||
- name: Docker metadata
|
- name: Docker metadata
|
||||||
id: docker_meta
|
id: docker_meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v5.8.0
|
||||||
with:
|
with:
|
||||||
images: ghcr.io/pterodactyl/wings
|
images: ghcr.io/${{ github.repository }}
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=false
|
latest=false
|
||||||
tags: |
|
tags: |
|
||||||
@ -31,17 +34,17 @@ jobs:
|
|||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
|
|
||||||
- name: Setup QEMU
|
- name: Setup QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||||
|
|
||||||
- name: Setup Docker buildx
|
- name: Setup Docker buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.REGISTRY_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Get Build Information
|
- name: Get Build Information
|
||||||
id: build_info
|
id: build_info
|
||||||
@ -50,7 +53,7 @@ jobs:
|
|||||||
echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Build and Push (tag)
|
- name: Build and Push (tag)
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||||
if: "github.event_name == 'release' && github.event.action == 'published'"
|
if: "github.event_name == 'release' && github.event.action == 'published'"
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
@ -63,7 +66,7 @@ jobs:
|
|||||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
|
|
||||||
- name: Build and Push (develop)
|
- name: Build and Push (develop)
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||||
if: "github.event_name == 'push' && contains(github.ref, 'develop')"
|
if: "github.event_name == 'push' && contains(github.ref, 'develop')"
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
|
|||||||
27
.github/workflows/push.yaml
vendored
27
.github/workflows/push.yaml
vendored
@ -15,20 +15,21 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-20.04]
|
os: [ubuntu-24.04]
|
||||||
go: ["1.19.9", "1.20.4"]
|
go: ["1.23.7", "1.24.1"]
|
||||||
goos: [linux]
|
goos: [linux]
|
||||||
goarch: [amd64, arm64]
|
goarch: [amd64, arm64]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
steps:
|
steps:
|
||||||
|
- name: Code checkout
|
||||||
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
|
|
||||||
- name: Code checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: go mod download
|
- name: go mod download
|
||||||
env:
|
env:
|
||||||
CGO_ENABLED: 0
|
CGO_ENABLED: 0
|
||||||
@ -42,8 +43,8 @@ jobs:
|
|||||||
CGO_ENABLED: 0
|
CGO_ENABLED: 0
|
||||||
SRC_PATH: github.com/pterodactyl/wings
|
SRC_PATH: github.com/pterodactyl/wings
|
||||||
run: |
|
run: |
|
||||||
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o dist/wings ${SRC_PATH}
|
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GITHUB_SHA:0:7}" -o dist/wings ${SRC_PATH}
|
||||||
go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o dist/wings_debug ${SRC_PATH}
|
go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GITHUB_SHA:0:7}" -o dist/wings_debug ${SRC_PATH}
|
||||||
chmod 755 dist/*
|
chmod 755 dist/*
|
||||||
|
|
||||||
- name: go test
|
- name: go test
|
||||||
@ -61,15 +62,15 @@ jobs:
|
|||||||
go test -race $(go list ./...)
|
go test -race $(go list ./...)
|
||||||
|
|
||||||
- name: Upload Release Artifact
|
- name: Upload Release Artifact
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||||
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
|
if: ${{ (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') && matrix.go == '1.23.7' }}
|
||||||
with:
|
with:
|
||||||
name: wings_linux_${{ matrix.goarch }}
|
name: wings_linux_${{ matrix.goarch }}
|
||||||
path: dist/wings
|
path: dist/wings
|
||||||
|
|
||||||
- name: Upload Debug Artifact
|
- name: Upload Debug Artifact
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||||
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
|
if: ${{ (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') && matrix.go == '1.23.7' }}
|
||||||
with:
|
with:
|
||||||
name: wings_linux_${{ matrix.goarch }}_debug
|
name: wings_linux_${{ matrix.goarch }}_debug
|
||||||
path: dist/wings_debug
|
path: dist/wings_debug
|
||||||
|
|||||||
48
.github/workflows/release.yaml
vendored
48
.github/workflows/release.yaml
vendored
@ -8,16 +8,18 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
release:
|
release:
|
||||||
name: Release
|
name: Release
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-24.04
|
||||||
|
permissions:
|
||||||
|
contents: write # write is required to create releases and push.
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Code Checkout
|
- name: Code checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
|
||||||
with:
|
with:
|
||||||
go-version: "1.19.9"
|
go-version: "1.23.7"
|
||||||
|
|
||||||
- name: Build release binaries
|
- name: Build release binaries
|
||||||
env:
|
env:
|
||||||
@ -57,41 +59,13 @@ jobs:
|
|||||||
git push
|
git push
|
||||||
|
|
||||||
- name: Create release
|
- name: Create release
|
||||||
id: create_release
|
uses: softprops/action-gh-release@6da8fa9354ddfdc4aeace5fc48d7f679b5214090 # v2.4.1
|
||||||
uses: softprops/action-gh-release@v1
|
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
draft: true
|
draft: true
|
||||||
prerelease: ${{ contains(github.ref, 'rc') || contains(github.ref, 'beta') || contains(github.ref, 'alpha') }}
|
prerelease: ${{ contains(github.ref, 'rc') || contains(github.ref, 'beta') || contains(github.ref, 'alpha') }}
|
||||||
body_path: ./RELEASE_CHANGELOG
|
body_path: ./RELEASE_CHANGELOG
|
||||||
|
files: |
|
||||||
- name: Upload amd64 binary
|
dist/*
|
||||||
uses: actions/upload-release-asset@v1
|
checksums.txt
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: dist/wings_linux_amd64
|
|
||||||
asset_name: wings_linux_amd64
|
|
||||||
asset_content_type: application/octet-stream
|
|
||||||
|
|
||||||
- name: Upload arm64 binary
|
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: dist/wings_linux_arm64
|
|
||||||
asset_name: wings_linux_arm64
|
|
||||||
asset_content_type: application/octet-stream
|
|
||||||
|
|
||||||
- name: Upload checksum
|
|
||||||
uses: actions/upload-release-asset@v1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
with:
|
|
||||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
|
||||||
asset_path: ./checksums.txt
|
|
||||||
asset_name: checksums.txt
|
|
||||||
asset_content_type: text/plain
|
|
||||||
|
|||||||
52
CHANGELOG.md
52
CHANGELOG.md
@ -1,5 +1,57 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v1.11.14
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
* Support relative file paths for the Wings config ([#180](https://github.com/pterodactyl/wings/pull/180))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
* Folders not being sorted before files properly ([#5078](https://github.com/pterodactyl/panel/issues/5078)
|
||||||
|
|
||||||
|
## v1.11.13
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
* Auto-configure not working ([#5087](https://github.com/pterodactyl/panel/issues/5087))
|
||||||
|
* Individual files unable to be decompressed ([#5034](https://github.com/pterodactyl/panel/issues/5034))
|
||||||
|
|
||||||
|
## v1.11.12
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
* Arbitrary File Write/Read ([GHSA-gqmf-jqgv-v8fw](https://github.com/pterodactyl/wings/security/advisories/GHSA-gqmf-jqgv-v8fw))
|
||||||
|
* Server-side Request Forgery (SSRF) during remote file pull ([GHSA-qq22-jj8x-4wwv](https://github.com/pterodactyl/wings/security/advisories/GHSA-qq22-jj8x-4wwv))
|
||||||
|
* Invalid `Content-Type` being used with the `wings diagnostics` command ([#186](https://github.com/pterodactyl/wings/pull/186))
|
||||||
|
|
||||||
|
## v1.11.11
|
||||||
|
### Fixed
|
||||||
|
* Backups missing content when a `.pteroignore` file is used
|
||||||
|
* Archives originating from a subdirectory not containing any files ([#5030](https://github.com/pterodactyl/panel/issues/5030))
|
||||||
|
|
||||||
|
## v1.11.10
|
||||||
|
### Fixed
|
||||||
|
* Archives randomly ignoring files and directories ([#5027](https://github.com/pterodactyl/panel/issues/5027))
|
||||||
|
* Crash when deleting or transferring a server ([#5028](https://github.com/pterodactyl/panel/issues/5028))
|
||||||
|
|
||||||
|
## v1.11.9
|
||||||
|
### Changed
|
||||||
|
* Release binaries are now built with Go 1.21.8
|
||||||
|
* Updated Go dependencies
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
* [CVE-2024-27102](https://www.cve.org/CVERecord?id=CVE-2024-27102)
|
||||||
|
|
||||||
|
## v1.11.8
|
||||||
|
### Changed
|
||||||
|
* Release binaries are now built with Go 1.20.10 (resolves [CVE-2023-44487](https://www.cve.org/CVERecord?id=CVE-2023-44487))
|
||||||
|
* Updated Go dependencies
|
||||||
|
|
||||||
|
## v1.11.7
|
||||||
|
### Changed
|
||||||
|
* Updated Go dependencies (this resolves an issue related to `http: invalid Host header` with Docker)
|
||||||
|
* Wings is now built with go1.19.11
|
||||||
|
|
||||||
## v1.11.6
|
## v1.11.6
|
||||||
### Fixed
|
### Fixed
|
||||||
* CVE-2023-32080
|
* CVE-2023-32080
|
||||||
|
|||||||
11
Dockerfile
11
Dockerfile
@ -1,8 +1,8 @@
|
|||||||
# Stage 1 (Build)
|
# Stage 1 (Build)
|
||||||
FROM golang:1.19-alpine AS builder
|
FROM golang:1.23.7-alpine AS builder
|
||||||
|
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
RUN apk add --update --no-cache git make
|
RUN apk add --update --no-cache git make mailcap
|
||||||
WORKDIR /app/
|
WORKDIR /app/
|
||||||
COPY go.mod go.sum /app/
|
COPY go.mod go.sum /app/
|
||||||
RUN go mod download
|
RUN go mod download
|
||||||
@ -18,8 +18,11 @@ RUN echo "ID=\"distroless\"" > /etc/os-release
|
|||||||
# Stage 2 (Final)
|
# Stage 2 (Final)
|
||||||
FROM gcr.io/distroless/static:latest
|
FROM gcr.io/distroless/static:latest
|
||||||
COPY --from=builder /etc/os-release /etc/os-release
|
COPY --from=builder /etc/os-release /etc/os-release
|
||||||
|
COPY --from=builder /etc/mime.types /etc/mime.types
|
||||||
|
|
||||||
COPY --from=builder /app/wings /usr/bin/
|
COPY --from=builder /app/wings /usr/bin/
|
||||||
CMD [ "/usr/bin/wings", "--config", "/etc/pterodactyl/config.yml" ]
|
|
||||||
|
|
||||||
EXPOSE 8080
|
ENTRYPOINT ["/usr/bin/wings"]
|
||||||
|
CMD ["--config", "/etc/pterodactyl/config.yml"]
|
||||||
|
|
||||||
|
EXPOSE 8080 2022
|
||||||
|
|||||||
19
README.md
19
README.md
@ -15,18 +15,17 @@ dependencies, and allowing users to authenticate with the same credentials they
|
|||||||
|
|
||||||
## Sponsors
|
## Sponsors
|
||||||
|
|
||||||
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's development.
|
I would like to extend my sincere thanks to the following sponsors for helping fund Pterodactyl's development.
|
||||||
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
|
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
|
||||||
|
|
||||||
| Company | About |
|
| Company | About |
|
||||||
|-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|-----------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| [**WISP**](https://wisp.gg) | Extra features. |
|
| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
|
||||||
| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
|
| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
|
||||||
| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
|
| [**MineStrator**](https://minestrator.com/) | Looking for the most highend French hosting company for your minecraft server? More than 24,000 members on our discord trust us. Give us a try! |
|
||||||
| [**MineStrator**](https://minestrator.com/) | Looking for the most highend French hosting company for your minecraft server? More than 24,000 members on our discord trust us. Give us a try! |
|
| [**HostEZ**](https://hostez.io) | US & EU Rust & Minecraft Hosting. DDoS Protected bare metal, VPS and colocation with low latency, high uptime and maximum availability. EZ! |
|
||||||
| [**Skynode**](https://www.skynode.pro/) | Skynode provides blazing fast game servers along with a top-notch user experience. Whatever our clients are looking for, we're able to provide it! |
|
| [**Blueprint**](https://blueprint.zip/?utm_source=pterodactyl&utm_medium=sponsor) | Create and install Pterodactyl addons and themes with the growing Blueprint framework - the package-manager for Pterodactyl. Use multiple modifications at once without worrying about conflicts and make use of the large extension ecosystem. |
|
||||||
| [**VibeGAMES**](https://vibegames.net/) | VibeGAMES is a game server provider that specializes in DDOS protection for the games we offer. We have multiple locations in the US, Brazil, France, Germany, Singapore, Australia and South Africa. |
|
| [**indifferent broccoli**](https://indifferentbroccoli.com/) | indifferent broccoli is a game server hosting and rental company. With us, you get top-notch computer power for your gaming sessions. We destroy lag, latency, and complexity--letting you focus on the fun stuff. |
|
||||||
| [**Pterodactyl Market**](https://pterodactylmarket.com/) | Pterodactyl Market is a one-and-stop shop for Pterodactyl. In our market, you can find Add-ons, Themes, Eggs, and more for Pterodactyl. |
|
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -13,7 +14,6 @@ import (
|
|||||||
|
|
||||||
"github.com/AlecAivazis/survey/v2"
|
"github.com/AlecAivazis/survey/v2"
|
||||||
"github.com/AlecAivazis/survey/v2/terminal"
|
"github.com/AlecAivazis/survey/v2/terminal"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
@ -155,6 +155,9 @@ func configureCmdRun(cmd *cobra.Command, args []string) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Manually specify the Panel URL as it won't be decoded from JSON.
|
||||||
|
cfg.PanelLocation = configureArgs.PanelURL
|
||||||
|
|
||||||
if err = config.WriteToDisk(cfg); err != nil {
|
if err = config.WriteToDisk(cfg); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -17,9 +18,9 @@ import (
|
|||||||
"github.com/AlecAivazis/survey/v2/terminal"
|
"github.com/AlecAivazis/survey/v2/terminal"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
dockersystem "github.com/docker/docker/api/types/system"
|
||||||
"github.com/docker/docker/pkg/parsers/kernel"
|
"github.com/docker/docker/pkg/parsers/kernel"
|
||||||
"github.com/docker/docker/pkg/parsers/operatingsystem"
|
"github.com/docker/docker/pkg/parsers/operatingsystem"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
@ -206,18 +207,18 @@ func diagnosticsCmdRun(*cobra.Command, []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDockerInfo() (types.Version, types.Info, error) {
|
func getDockerInfo() (types.Version, dockersystem.Info, error) {
|
||||||
client, err := environment.Docker()
|
client, err := environment.Docker()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.Version{}, types.Info{}, err
|
return types.Version{}, dockersystem.Info{}, err
|
||||||
}
|
}
|
||||||
dockerVersion, err := client.ServerVersion(context.Background())
|
dockerVersion, err := client.ServerVersion(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.Version{}, types.Info{}, err
|
return types.Version{}, dockersystem.Info{}, err
|
||||||
}
|
}
|
||||||
dockerInfo, err := client.Info(context.Background())
|
dockerInfo, err := client.Info(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.Version{}, types.Info{}, err
|
return types.Version{}, dockersystem.Info{}, err
|
||||||
}
|
}
|
||||||
return dockerVersion, dockerInfo, nil
|
return dockerVersion, dockerInfo, nil
|
||||||
}
|
}
|
||||||
@ -229,8 +230,8 @@ func uploadToHastebin(hbUrl, content string) (string, error) {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
u.Path = path.Join(u.Path, "documents")
|
u.Path = path.Join(u.Path, "documents")
|
||||||
res, err := http.Post(u.String(), "plain/text", r)
|
res, err := http.Post(u.String(), "text/plain", r)
|
||||||
if err != nil || res.StatusCode != 200 {
|
if err != nil || res.StatusCode < 200 || res.StatusCode >= 300 {
|
||||||
fmt.Println("Failed to upload report to ", u.String(), err)
|
fmt.Println("Failed to upload report to ", u.String(), err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|||||||
35
cmd/root.go
35
cmd/root.go
@ -13,7 +13,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/NYTimes/logrotate"
|
"github.com/NYTimes/logrotate"
|
||||||
@ -104,6 +104,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
|
|
||||||
if err := config.ConfigureTimezone(); err != nil {
|
if err := config.ConfigureTimezone(); err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value")
|
log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.WithField("timezone", config.Get().System.Timezone).Info("configured wings with system timezone")
|
log.WithField("timezone", config.Get().System.Timezone).Info("configured wings with system timezone")
|
||||||
if err := config.ConfigureDirectories(); err != nil {
|
if err := config.ConfigureDirectories(); err != nil {
|
||||||
@ -112,6 +113,11 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
}
|
}
|
||||||
if err := config.EnsurePterodactylUser(); err != nil {
|
if err := config.EnsurePterodactylUser(); err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to create pterodactyl system user")
|
log.WithField("error", err).Fatal("failed to create pterodactyl system user")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := config.ConfigurePasswd(); err != nil {
|
||||||
|
log.WithField("error", err).Fatal("failed to configure container passwd file")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"username": config.Get().System.Username,
|
"username": config.Get().System.Username,
|
||||||
@ -123,9 +129,10 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t := config.Get().Token
|
||||||
pclient := remote.New(
|
pclient := remote.New(
|
||||||
config.Get().PanelLocation,
|
config.Get().PanelLocation,
|
||||||
remote.WithCredentials(config.Get().AuthenticationTokenId, config.Get().AuthenticationToken),
|
remote.WithCredentials(t.ID, t.Token),
|
||||||
remote.WithHttpClient(&http.Client{
|
remote.WithHttpClient(&http.Client{
|
||||||
Timeout: time.Second * time.Duration(config.Get().RemoteQuery.Timeout),
|
Timeout: time.Second * time.Duration(config.Get().RemoteQuery.Timeout),
|
||||||
}),
|
}),
|
||||||
@ -133,19 +140,26 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
|
|
||||||
if err := database.Initialize(); err != nil {
|
if err := database.Initialize(); err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to initialize database")
|
log.WithField("error", err).Fatal("failed to initialize database")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
manager, err := server.NewManager(cmd.Context(), pclient)
|
manager, err := server.NewManager(cmd.Context(), pclient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to load server configurations")
|
log.WithField("error", err).Fatal("failed to load server configurations")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := environment.ConfigureDocker(cmd.Context()); err != nil {
|
if err := environment.ConfigureDocker(cmd.Context()); err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to configure docker environment")
|
log.WithField("error", err).Fatal("failed to configure docker environment")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := config.WriteToDisk(config.Get()); err != nil {
|
if err := config.WriteToDisk(config.Get()); err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to write configuration to disk")
|
if !errors.Is(err, syscall.EROFS) {
|
||||||
|
log.WithField("error", err).Error("failed to write configuration to disk")
|
||||||
|
} else {
|
||||||
|
log.WithField("error", err).Debug("failed to write configuration to disk")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Just for some nice log output.
|
// Just for some nice log output.
|
||||||
@ -379,13 +393,14 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
// Reads the configuration from the disk and then sets up the global singleton
|
// Reads the configuration from the disk and then sets up the global singleton
|
||||||
// with all the configuration values.
|
// with all the configuration values.
|
||||||
func initConfig() {
|
func initConfig() {
|
||||||
if !strings.HasPrefix(configPath, "/") {
|
if !filepath.IsAbs(configPath) {
|
||||||
d, err := os.Getwd()
|
d, err := filepath.Abs(configPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log2.Fatalf("cmd/root: could not determine directory: %s", err)
|
log2.Fatalf("cmd/root: failed to get path to config file: %s", err)
|
||||||
}
|
}
|
||||||
configPath = path.Clean(path.Join(d, configPath))
|
configPath = d
|
||||||
}
|
}
|
||||||
|
|
||||||
err := config.FromFile(configPath)
|
err := config.FromFile(configPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
@ -440,18 +455,18 @@ in all copies or substantial portions of the Software.%s`), system.Version, time
|
|||||||
}
|
}
|
||||||
|
|
||||||
func exitWithConfigurationNotice() {
|
func exitWithConfigurationNotice() {
|
||||||
fmt.Print(colorstring.Color(`
|
fmt.Printf(colorstring.Color(`
|
||||||
[_red_][white][bold]Error: Configuration File Not Found[reset]
|
[_red_][white][bold]Error: Configuration File Not Found[reset]
|
||||||
|
|
||||||
Wings was not able to locate your configuration file, and therefore is not
|
Wings was not able to locate your configuration file, and therefore is not
|
||||||
able to complete its boot process. Please ensure you have copied your instance
|
able to complete its boot process. Please ensure you have copied your instance
|
||||||
configuration file into the default location below.
|
configuration file into the default location below.
|
||||||
|
|
||||||
Default Location: /etc/pterodactyl/config.yml
|
Default Location: %s
|
||||||
|
|
||||||
[yellow]This is not a bug with this software. Please do not make a bug report
|
[yellow]This is not a bug with this software. Please do not make a bug report
|
||||||
for this issue, it will be closed.[reset]
|
for this issue, it will be closed.[reset]
|
||||||
|
|
||||||
`))
|
`), config.DefaultLocation)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|||||||
186
config/config.go
186
config/config.go
@ -1,6 +1,7 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -12,6 +13,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -20,6 +22,7 @@ import (
|
|||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/creasty/defaults"
|
"github.com/creasty/defaults"
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
@ -87,7 +90,7 @@ type ApiConfiguration struct {
|
|||||||
// Determines if functionality for allowing remote download of files into server directories
|
// Determines if functionality for allowing remote download of files into server directories
|
||||||
// is enabled on this instance. If set to "true" remote downloads will not be possible for
|
// is enabled on this instance. If set to "true" remote downloads will not be possible for
|
||||||
// servers.
|
// servers.
|
||||||
DisableRemoteDownload bool `json:"disable_remote_download" yaml:"disable_remote_download"`
|
DisableRemoteDownload bool `json:"-" yaml:"disable_remote_download"`
|
||||||
|
|
||||||
// The maximum size for files uploaded through the Panel in MB.
|
// The maximum size for files uploaded through the Panel in MB.
|
||||||
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
||||||
@ -121,23 +124,23 @@ type RemoteQueryConfiguration struct {
|
|||||||
// SystemConfiguration defines basic system configuration settings.
|
// SystemConfiguration defines basic system configuration settings.
|
||||||
type SystemConfiguration struct {
|
type SystemConfiguration struct {
|
||||||
// The root directory where all of the pterodactyl data is stored at.
|
// The root directory where all of the pterodactyl data is stored at.
|
||||||
RootDirectory string `default:"/var/lib/pterodactyl" yaml:"root_directory"`
|
RootDirectory string `default:"/var/lib/pterodactyl" json:"-" yaml:"root_directory"`
|
||||||
|
|
||||||
// Directory where logs for server installations and other wings events are logged.
|
// Directory where logs for server installations and other wings events are logged.
|
||||||
LogDirectory string `default:"/var/log/pterodactyl" yaml:"log_directory"`
|
LogDirectory string `default:"/var/log/pterodactyl" json:"-" yaml:"log_directory"`
|
||||||
|
|
||||||
// Directory where the server data is stored at.
|
// Directory where the server data is stored at.
|
||||||
Data string `default:"/var/lib/pterodactyl/volumes" yaml:"data"`
|
Data string `default:"/var/lib/pterodactyl/volumes" json:"-" yaml:"data"`
|
||||||
|
|
||||||
// Directory where server archives for transferring will be stored.
|
// Directory where server archives for transferring will be stored.
|
||||||
ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" yaml:"archive_directory"`
|
ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" json:"-" yaml:"archive_directory"`
|
||||||
|
|
||||||
// Directory where local backups will be stored on the machine.
|
// Directory where local backups will be stored on the machine.
|
||||||
BackupDirectory string `default:"/var/lib/pterodactyl/backups" yaml:"backup_directory"`
|
BackupDirectory string `default:"/var/lib/pterodactyl/backups" json:"-" yaml:"backup_directory"`
|
||||||
|
|
||||||
// TmpDirectory specifies where temporary files for Pterodactyl installation processes
|
// TmpDirectory specifies where temporary files for Pterodactyl installation processes
|
||||||
// should be created. This supports environments running docker-in-docker.
|
// should be created. This supports environments running docker-in-docker.
|
||||||
TmpDirectory string `default:"/tmp/pterodactyl" yaml:"tmp_directory"`
|
TmpDirectory string `default:"/tmp/pterodactyl" json:"-" yaml:"tmp_directory"`
|
||||||
|
|
||||||
// The user that should own all of the server files, and be used for containers.
|
// The user that should own all of the server files, and be used for containers.
|
||||||
Username string `default:"pterodactyl" yaml:"username"`
|
Username string `default:"pterodactyl" yaml:"username"`
|
||||||
@ -170,6 +173,25 @@ type SystemConfiguration struct {
|
|||||||
Gid int `yaml:"gid"`
|
Gid int `yaml:"gid"`
|
||||||
} `yaml:"user"`
|
} `yaml:"user"`
|
||||||
|
|
||||||
|
// Passwd controls the mounting of a generated passwd files into containers started by Wings.
|
||||||
|
Passwd struct {
|
||||||
|
// Enable controls whether generated passwd files should be mounted into containers.
|
||||||
|
//
|
||||||
|
// By default this option is disabled and Wings will not mount any additional passwd
|
||||||
|
// files into containers.
|
||||||
|
Enable bool `yaml:"enabled" default:"false"`
|
||||||
|
|
||||||
|
// Directory is the directory on disk where the generated files will be stored.
|
||||||
|
// This directory may be temporary as it will be re-created whenever Wings is started.
|
||||||
|
//
|
||||||
|
// This path **WILL** be both written to by Wings and mounted into containers created by
|
||||||
|
// Wings. If you are running Wings itself in a container, this path will need to be mounted
|
||||||
|
// into the Wings container as the exact path on the host, which should match the value
|
||||||
|
// specified here. If you are using SELinux, you will need to make sure this file has the
|
||||||
|
// correct SELinux context in order for containers to use it.
|
||||||
|
Directory string `yaml:"directory" default:"/run/wings/etc"`
|
||||||
|
} `yaml:"passwd"`
|
||||||
|
|
||||||
// The amount of time in seconds that can elapse before a server's disk space calculation is
|
// The amount of time in seconds that can elapse before a server's disk space calculation is
|
||||||
// considered stale and a re-check should occur. DANGER: setting this value too low can seriously
|
// considered stale and a re-check should occur. DANGER: setting this value too low can seriously
|
||||||
// impact system performance and cause massive I/O bottlenecks and high CPU usage for the Wings
|
// impact system performance and cause massive I/O bottlenecks and high CPU usage for the Wings
|
||||||
@ -209,6 +231,8 @@ type SystemConfiguration struct {
|
|||||||
Backups Backups `yaml:"backups"`
|
Backups Backups `yaml:"backups"`
|
||||||
|
|
||||||
Transfers Transfers `yaml:"transfers"`
|
Transfers Transfers `yaml:"transfers"`
|
||||||
|
|
||||||
|
OpenatMode string `default:"auto" yaml:"openat_mode"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type CrashDetection struct {
|
type CrashDetection struct {
|
||||||
@ -271,7 +295,14 @@ type ConsoleThrottles struct {
|
|||||||
Period uint64 `json:"line_reset_interval" yaml:"line_reset_interval" default:"100"`
|
Period uint64 `json:"line_reset_interval" yaml:"line_reset_interval" default:"100"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Token struct {
|
||||||
|
ID string
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
type Configuration struct {
|
type Configuration struct {
|
||||||
|
Token Token `json:"-" yaml:"-"`
|
||||||
|
|
||||||
// The location from which this configuration instance was instantiated.
|
// The location from which this configuration instance was instantiated.
|
||||||
path string
|
path string
|
||||||
|
|
||||||
@ -302,7 +333,7 @@ type Configuration struct {
|
|||||||
|
|
||||||
// The location where the panel is running that this daemon should connect to
|
// The location where the panel is running that this daemon should connect to
|
||||||
// to collect data and send events.
|
// to collect data and send events.
|
||||||
PanelLocation string `json:"remote" yaml:"remote"`
|
PanelLocation string `json:"-" yaml:"remote"`
|
||||||
RemoteQuery RemoteQueryConfiguration `json:"remote_query" yaml:"remote_query"`
|
RemoteQuery RemoteQueryConfiguration `json:"remote_query" yaml:"remote_query"`
|
||||||
|
|
||||||
// AllowedMounts is a list of allowed host-system mount points.
|
// AllowedMounts is a list of allowed host-system mount points.
|
||||||
@ -344,11 +375,16 @@ func NewAtPath(path string) (*Configuration, error) {
|
|||||||
// will be paused until it is complete.
|
// will be paused until it is complete.
|
||||||
func Set(c *Configuration) {
|
func Set(c *Configuration) {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
if _config == nil || _config.AuthenticationToken != c.AuthenticationToken {
|
defer mu.Unlock()
|
||||||
_jwtAlgo = jwt.NewHS256([]byte(c.AuthenticationToken))
|
token := c.Token.Token
|
||||||
|
if token == "" {
|
||||||
|
c.Token.Token = c.AuthenticationToken
|
||||||
|
token = c.Token.Token
|
||||||
|
}
|
||||||
|
if _config == nil || _config.Token.Token != token {
|
||||||
|
_jwtAlgo = jwt.NewHS256([]byte(token))
|
||||||
}
|
}
|
||||||
_config = c
|
_config = c
|
||||||
mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDebugViaFlag tracks if the application is running in debug mode because of
|
// SetDebugViaFlag tracks if the application is running in debug mode because of
|
||||||
@ -356,9 +392,9 @@ func Set(c *Configuration) {
|
|||||||
// change to the disk.
|
// change to the disk.
|
||||||
func SetDebugViaFlag(d bool) {
|
func SetDebugViaFlag(d bool) {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
_config.Debug = d
|
_config.Debug = d
|
||||||
_debugViaFlag = d
|
_debugViaFlag = d
|
||||||
mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the global configuration instance. This is a thread-safe operation
|
// Get returns the global configuration instance. This is a thread-safe operation
|
||||||
@ -383,8 +419,8 @@ func Get() *Configuration {
|
|||||||
// the global configuration.
|
// the global configuration.
|
||||||
func Update(callback func(c *Configuration)) {
|
func Update(callback func(c *Configuration)) {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
callback(_config)
|
callback(_config)
|
||||||
mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetJwtAlgorithm returns the in-memory JWT algorithm.
|
// GetJwtAlgorithm returns the in-memory JWT algorithm.
|
||||||
@ -493,6 +529,37 @@ func EnsurePterodactylUser() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConfigurePasswd generates required passwd files for use with containers started by Wings.
|
||||||
|
func ConfigurePasswd() error {
|
||||||
|
passwd := _config.System.Passwd
|
||||||
|
if !passwd.Enable {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
v := []byte(fmt.Sprintf(
|
||||||
|
`root:x:0:
|
||||||
|
container:x:%d:
|
||||||
|
nogroup:x:65534:`,
|
||||||
|
_config.System.User.Gid,
|
||||||
|
))
|
||||||
|
if err := os.WriteFile(filepath.Join(passwd.Directory, "group"), v, 0o644); err != nil {
|
||||||
|
return fmt.Errorf("failed to write file to %s/group: %v", passwd.Directory, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
v = []byte(fmt.Sprintf(
|
||||||
|
`root:x:0:0::/root:/bin/sh
|
||||||
|
container:x:%d:%d::/home/container:/bin/sh
|
||||||
|
nobody:x:65534:65534::/var/empty:/bin/sh
|
||||||
|
`,
|
||||||
|
_config.System.User.Uid,
|
||||||
|
_config.System.User.Gid,
|
||||||
|
))
|
||||||
|
if err := os.WriteFile(filepath.Join(passwd.Directory, "passwd"), v, 0o644); err != nil {
|
||||||
|
return fmt.Errorf("failed to write file to %s/passwd: %v", passwd.Directory, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// FromFile reads the configuration from the provided file and stores it in the
|
// FromFile reads the configuration from the provided file and stores it in the
|
||||||
// global singleton for this instance.
|
// global singleton for this instance.
|
||||||
func FromFile(path string) error {
|
func FromFile(path string) error {
|
||||||
@ -509,6 +576,26 @@ func FromFile(path string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.Token = Token{
|
||||||
|
ID: os.Getenv("WINGS_TOKEN_ID"),
|
||||||
|
Token: os.Getenv("WINGS_TOKEN"),
|
||||||
|
}
|
||||||
|
if c.Token.ID == "" {
|
||||||
|
c.Token.ID = c.AuthenticationTokenId
|
||||||
|
}
|
||||||
|
if c.Token.Token == "" {
|
||||||
|
c.Token.Token = c.AuthenticationToken
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Token.ID, err = Expand(c.Token.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.Token.Token, err = Expand(c.Token.Token)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Store this configuration in the global state.
|
// Store this configuration in the global state.
|
||||||
Set(c)
|
Set(c)
|
||||||
return nil
|
return nil
|
||||||
@ -557,6 +644,13 @@ func ConfigureDirectories() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _config.System.Passwd.Enable {
|
||||||
|
log.WithField("path", _config.System.Passwd.Directory).Debug("ensuring passwd directory exists")
|
||||||
|
if err := os.MkdirAll(_config.System.Passwd.Directory, 0o755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -671,3 +765,69 @@ func getSystemName() (string, error) {
|
|||||||
}
|
}
|
||||||
return release["ID"], nil
|
return release["ID"], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
openat2 atomic.Bool
|
||||||
|
openat2Set atomic.Bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func UseOpenat2() bool {
|
||||||
|
if openat2Set.Load() {
|
||||||
|
return openat2.Load()
|
||||||
|
}
|
||||||
|
defer openat2Set.Store(true)
|
||||||
|
|
||||||
|
c := Get()
|
||||||
|
openatMode := c.System.OpenatMode
|
||||||
|
switch openatMode {
|
||||||
|
case "openat2":
|
||||||
|
openat2.Store(true)
|
||||||
|
return true
|
||||||
|
case "openat":
|
||||||
|
openat2.Store(false)
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
fd, err := unix.Openat2(unix.AT_FDCWD, "/", &unix.OpenHow{})
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Warn("error occurred while checking for openat2 support, falling back to openat")
|
||||||
|
openat2.Store(false)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_ = unix.Close(fd)
|
||||||
|
openat2.Store(true)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expand expands an input string by calling [os.ExpandEnv] to expand all
|
||||||
|
// environment variables, then checks if the value is prefixed with `file://`
|
||||||
|
// to support reading the value from a file.
|
||||||
|
//
|
||||||
|
// NOTE: the order of expanding environment variables first then checking if
|
||||||
|
// the value references a file is important. This behaviour allows a user to
|
||||||
|
// pass a value like `file://${CREDENTIALS_DIRECTORY}/token` to allow us to
|
||||||
|
// work with credentials loaded by systemd's `LoadCredential` (or `LoadCredentialEncrypted`)
|
||||||
|
// options without the user needing to assume the path of `CREDENTIALS_DIRECTORY`
|
||||||
|
// or use a preStart script to read the files for us.
|
||||||
|
func Expand(v string) (string, error) {
|
||||||
|
// Expand environment variables within the string.
|
||||||
|
//
|
||||||
|
// NOTE: this may cause issues if the string contains `$` and doesn't intend
|
||||||
|
// on getting expanded, however we are using this for our tokens which are
|
||||||
|
// all alphanumeric characters only.
|
||||||
|
v = os.ExpandEnv(v)
|
||||||
|
|
||||||
|
// Handle files.
|
||||||
|
const filePrefix = "file://"
|
||||||
|
if strings.HasPrefix(v, filePrefix) {
|
||||||
|
p := v[len(filePrefix):]
|
||||||
|
|
||||||
|
b, err := os.ReadFile(p)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
v = string(bytes.TrimRight(bytes.TrimRight(b, "\r"), "\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|||||||
@ -2,11 +2,11 @@ package config
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/goccy/go-json"
|
"github.com/docker/docker/api/types/registry"
|
||||||
)
|
)
|
||||||
|
|
||||||
type dockerNetworkInterfaces struct {
|
type dockerNetworkInterfaces struct {
|
||||||
@ -115,7 +115,7 @@ type RegistryConfiguration struct {
|
|||||||
// Base64 returns the authentication for a given registry as a base64 encoded
|
// Base64 returns the authentication for a given registry as a base64 encoded
|
||||||
// string value.
|
// string value.
|
||||||
func (c RegistryConfiguration) Base64() (string, error) {
|
func (c RegistryConfiguration) Base64() (string, error) {
|
||||||
b, err := json.Marshal(types.AuthConfig{
|
b, err := json.Marshal(registry.AuthConfig{
|
||||||
Username: c.Username,
|
Username: c.Username,
|
||||||
Password: c.Password,
|
Password: c.Password,
|
||||||
})
|
})
|
||||||
|
|||||||
@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
|
||||||
@ -39,7 +38,7 @@ func ConfigureDocker(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nw := config.Get().Docker.Network
|
nw := config.Get().Docker.Network
|
||||||
resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{})
|
resource, err := cli.NetworkInspect(ctx, nw.Name, network.InspectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !client.IsErrNotFound(err) {
|
if !client.IsErrNotFound(err) {
|
||||||
return err
|
return err
|
||||||
@ -72,9 +71,10 @@ func ConfigureDocker(ctx context.Context) error {
|
|||||||
// Creates a new network on the machine if one does not exist already.
|
// Creates a new network on the machine if one does not exist already.
|
||||||
func createDockerNetwork(ctx context.Context, cli *client.Client) error {
|
func createDockerNetwork(ctx context.Context, cli *client.Client) error {
|
||||||
nw := config.Get().Docker.Network
|
nw := config.Get().Docker.Network
|
||||||
_, err := cli.NetworkCreate(ctx, nw.Name, types.NetworkCreate{
|
enableIPv6 := true
|
||||||
|
_, err := cli.NetworkCreate(ctx, nw.Name, network.CreateOptions{
|
||||||
Driver: nw.Driver,
|
Driver: nw.Driver,
|
||||||
EnableIPv6: true,
|
EnableIPv6: &enableIPv6,
|
||||||
Internal: nw.IsInternal,
|
Internal: nw.IsInternal,
|
||||||
IPAM: &network.IPAM{
|
IPAM: &network.IPAM{
|
||||||
Config: []network.IPAMConfig{{
|
Config: []network.IPAMConfig{{
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package docker
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -13,7 +14,6 @@ import (
|
|||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
)
|
)
|
||||||
|
|||||||
@ -12,9 +12,10 @@ import (
|
|||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/buger/jsonparser"
|
"github.com/buger/jsonparser"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
|
"github.com/docker/docker/api/types/image"
|
||||||
"github.com/docker/docker/api/types/mount"
|
"github.com/docker/docker/api/types/mount"
|
||||||
|
"github.com/docker/docker/api/types/network"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
@ -49,7 +50,7 @@ func (e *Environment) Attach(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := types.ContainerAttachOptions{
|
opts := container.AttachOptions{
|
||||||
Stdin: true,
|
Stdin: true,
|
||||||
Stdout: true,
|
Stdout: true,
|
||||||
Stderr: true,
|
Stderr: true,
|
||||||
@ -103,7 +104,7 @@ func (e *Environment) Attach(ctx context.Context) error {
|
|||||||
// container. This allows memory, cpu, and IO limitations to be adjusted on the
|
// container. This allows memory, cpu, and IO limitations to be adjusted on the
|
||||||
// fly for individual instances.
|
// fly for individual instances.
|
||||||
func (e *Environment) InSituUpdate() error {
|
func (e *Environment) InSituUpdate() error {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if _, err := e.ContainerInspect(ctx); err != nil {
|
if _, err := e.ContainerInspect(ctx); err != nil {
|
||||||
@ -199,14 +200,15 @@ func (e *Environment) Create() error {
|
|||||||
networkName := "ip-" + strings.ReplaceAll(strings.ReplaceAll(a.DefaultMapping.Ip, ".", "-"), ":", "-")
|
networkName := "ip-" + strings.ReplaceAll(strings.ReplaceAll(a.DefaultMapping.Ip, ".", "-"), ":", "-")
|
||||||
networkMode = container.NetworkMode(networkName)
|
networkMode = container.NetworkMode(networkName)
|
||||||
|
|
||||||
if _, err := e.client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{}); err != nil {
|
if _, err := e.client.NetworkInspect(ctx, networkName, network.InspectOptions{}); err != nil {
|
||||||
if !client.IsErrNotFound(err) {
|
if !client.IsErrNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := e.client.NetworkCreate(ctx, networkName, types.NetworkCreate{
|
enableIPv6 := false
|
||||||
|
if _, err := e.client.NetworkCreate(ctx, networkName, network.CreateOptions{
|
||||||
Driver: "bridge",
|
Driver: "bridge",
|
||||||
EnableIPv6: false,
|
EnableIPv6: &enableIPv6,
|
||||||
Internal: false,
|
Internal: false,
|
||||||
Attachable: false,
|
Attachable: false,
|
||||||
Ingress: false,
|
Ingress: false,
|
||||||
@ -270,7 +272,7 @@ func (e *Environment) Destroy() error {
|
|||||||
// We set it to stopping than offline to prevent crash detection from being triggered.
|
// We set it to stopping than offline to prevent crash detection from being triggered.
|
||||||
e.SetState(environment.ProcessStoppingState)
|
e.SetState(environment.ProcessStoppingState)
|
||||||
|
|
||||||
err := e.client.ContainerRemove(context.Background(), e.Id, types.ContainerRemoveOptions{
|
err := e.client.ContainerRemove(context.Background(), e.Id, container.RemoveOptions{
|
||||||
RemoveVolumes: true,
|
RemoveVolumes: true,
|
||||||
RemoveLinks: false,
|
RemoveLinks: false,
|
||||||
Force: true,
|
Force: true,
|
||||||
@ -316,7 +318,7 @@ func (e *Environment) SendCommand(c string) error {
|
|||||||
// is running or not, it will simply try to read the last X bytes of the file
|
// is running or not, it will simply try to read the last X bytes of the file
|
||||||
// and return them.
|
// and return them.
|
||||||
func (e *Environment) Readlog(lines int) ([]string, error) {
|
func (e *Environment) Readlog(lines int) ([]string, error) {
|
||||||
r, err := e.client.ContainerLogs(context.Background(), e.Id, types.ContainerLogsOptions{
|
r, err := e.client.ContainerLogs(context.Background(), e.Id, container.LogsOptions{
|
||||||
ShowStdout: true,
|
ShowStdout: true,
|
||||||
ShowStderr: true,
|
ShowStderr: true,
|
||||||
Tail: strconv.Itoa(lines),
|
Tail: strconv.Itoa(lines),
|
||||||
@ -343,25 +345,25 @@ func (e *Environment) Readlog(lines int) ([]string, error) {
|
|||||||
// late, and we don't need to block all the servers from booting just because
|
// late, and we don't need to block all the servers from booting just because
|
||||||
// of that. I'd imagine in a lot of cases an outage shouldn't affect users too
|
// of that. I'd imagine in a lot of cases an outage shouldn't affect users too
|
||||||
// badly. It'll at least keep existing servers working correctly if anything.
|
// badly. It'll at least keep existing servers working correctly if anything.
|
||||||
func (e *Environment) ensureImageExists(image string) error {
|
func (e *Environment) ensureImageExists(img string) error {
|
||||||
e.Events().Publish(environment.DockerImagePullStarted, "")
|
e.Events().Publish(environment.DockerImagePullStarted, "")
|
||||||
defer e.Events().Publish(environment.DockerImagePullCompleted, "")
|
defer e.Events().Publish(environment.DockerImagePullCompleted, "")
|
||||||
|
|
||||||
// Images prefixed with a ~ are local images that we do not need to try and pull.
|
// Images prefixed with a ~ are local images that we do not need to try and pull.
|
||||||
if strings.HasPrefix(image, "~") {
|
if strings.HasPrefix(img, "~") {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Give it up to 15 minutes to pull the image. I think this should cover 99.8% of cases where an
|
// Give it up to 15 minutes to pull the image. I think this should cover 99.8% of cases where an
|
||||||
// image pull might fail. I can't imagine it will ever take more than 15 minutes to fully pull
|
// image pull might fail. I can't imagine it will ever take more than 15 minutes to fully pull
|
||||||
// an image. Let me know when I am inevitably wrong here...
|
// an image. Let me know when I am inevitably wrong here...
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15)
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// Get a registry auth configuration from the config.
|
// Get a registry auth configuration from the config.
|
||||||
var registryAuth *config.RegistryConfiguration
|
var registryAuth *config.RegistryConfiguration
|
||||||
for registry, c := range config.Get().Docker.Registries {
|
for registry, c := range config.Get().Docker.Registries {
|
||||||
if !strings.HasPrefix(image, registry) {
|
if !strings.HasPrefix(img, registry) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -371,7 +373,7 @@ func (e *Environment) ensureImageExists(image string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the ImagePullOptions.
|
// Get the ImagePullOptions.
|
||||||
imagePullOptions := types.ImagePullOptions{All: false}
|
imagePullOptions := image.PullOptions{All: false}
|
||||||
if registryAuth != nil {
|
if registryAuth != nil {
|
||||||
b64, err := registryAuth.Base64()
|
b64, err := registryAuth.Base64()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -382,23 +384,23 @@ func (e *Environment) ensureImageExists(image string) error {
|
|||||||
imagePullOptions.RegistryAuth = b64
|
imagePullOptions.RegistryAuth = b64
|
||||||
}
|
}
|
||||||
|
|
||||||
out, err := e.client.ImagePull(ctx, image, imagePullOptions)
|
out, err := e.client.ImagePull(ctx, img, imagePullOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
images, ierr := e.client.ImageList(ctx, types.ImageListOptions{})
|
images, ierr := e.client.ImageList(ctx, image.ListOptions{})
|
||||||
if ierr != nil {
|
if ierr != nil {
|
||||||
// Well damn, something has gone really wrong here, just go ahead and abort there
|
// Well damn, something has gone really wrong here, just go ahead and abort there
|
||||||
// isn't much anything we can do to try and self-recover from this.
|
// isn't much anything we can do to try and self-recover from this.
|
||||||
return errors.Wrap(ierr, "environment/docker: failed to list images")
|
return errors.Wrap(ierr, "environment/docker: failed to list images")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, img := range images {
|
for _, img2 := range images {
|
||||||
for _, t := range img.RepoTags {
|
for _, t := range img2.RepoTags {
|
||||||
if t != image {
|
if t != img {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"image": image,
|
"image": img,
|
||||||
"container_id": e.Id,
|
"container_id": e.Id,
|
||||||
"err": err.Error(),
|
"err": err.Error(),
|
||||||
}).Warn("unable to pull requested image from remote source, however the image exists locally")
|
}).Warn("unable to pull requested image from remote source, however the image exists locally")
|
||||||
@ -409,11 +411,11 @@ func (e *Environment) ensureImageExists(image string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.Wrapf(err, "environment/docker: failed to pull \"%s\" image for server", image)
|
return errors.Wrapf(err, "environment/docker: failed to pull \"%s\" image for server", img)
|
||||||
}
|
}
|
||||||
defer out.Close()
|
defer out.Close()
|
||||||
|
|
||||||
log.WithField("image", image).Debug("pulling docker image... this could take a bit of time")
|
log.WithField("image", img).Debug("pulling docker image... this could take a bit of time")
|
||||||
|
|
||||||
// I'm not sure what the best approach here is, but this will block execution until the image
|
// I'm not sure what the best approach here is, but this will block execution until the image
|
||||||
// is done being pulled, which is what we need.
|
// is done being pulled, which is what we need.
|
||||||
@ -431,22 +433,21 @@ func (e *Environment) ensureImageExists(image string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.WithField("image", image).Debug("completed docker image pull")
|
log.WithField("image", img).Debug("completed docker image pull")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Environment) convertMounts() []mount.Mount {
|
func (e *Environment) convertMounts() []mount.Mount {
|
||||||
var out []mount.Mount
|
mounts := e.Configuration.Mounts()
|
||||||
|
out := make([]mount.Mount, len(mounts))
|
||||||
for _, m := range e.Configuration.Mounts() {
|
for i, m := range mounts {
|
||||||
out = append(out, mount.Mount{
|
out[i] = mount.Mount{
|
||||||
Type: mount.TypeBind,
|
Type: mount.TypeBind,
|
||||||
Source: m.Source,
|
Source: m.Source,
|
||||||
Target: m.Target,
|
Target: m.Target,
|
||||||
ReadOnly: m.ReadOnly,
|
ReadOnly: m.ReadOnly,
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,12 +4,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
|
||||||
@ -27,7 +25,7 @@ import (
|
|||||||
// is running does not result in the server becoming un-bootable.
|
// is running does not result in the server becoming un-bootable.
|
||||||
func (e *Environment) OnBeforeStart(ctx context.Context) error {
|
func (e *Environment) OnBeforeStart(ctx context.Context) error {
|
||||||
// Always destroy and re-create the server container to ensure that synced data from the Panel is used.
|
// Always destroy and re-create the server container to ensure that synced data from the Panel is used.
|
||||||
if err := e.client.ContainerRemove(ctx, e.Id, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
|
if err := e.client.ContainerRemove(ctx, e.Id, container.RemoveOptions{RemoveVolumes: true}); err != nil {
|
||||||
if !client.IsErrNotFound(err) {
|
if !client.IsErrNotFound(err) {
|
||||||
return errors.WrapIf(err, "environment/docker: failed to remove container during pre-boot")
|
return errors.WrapIf(err, "environment/docker: failed to remove container during pre-boot")
|
||||||
}
|
}
|
||||||
@ -122,7 +120,7 @@ func (e *Environment) Start(ctx context.Context) error {
|
|||||||
return errors.WrapIf(err, "environment/docker: failed to attach to container")
|
return errors.WrapIf(err, "environment/docker: failed to attach to container")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := e.client.ContainerStart(actx, e.Id, types.ContainerStartOptions{}); err != nil {
|
if err := e.client.ContainerStart(actx, e.Id, container.StartOptions{}); err != nil {
|
||||||
return errors.WrapIf(err, "environment/docker: failed to start container")
|
return errors.WrapIf(err, "environment/docker: failed to start container")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,42 +141,49 @@ func (e *Environment) Stop(ctx context.Context) error {
|
|||||||
s := e.meta.Stop
|
s := e.meta.Stop
|
||||||
e.mu.RUnlock()
|
e.mu.RUnlock()
|
||||||
|
|
||||||
// A native "stop" as the Type field value will just skip over all of this
|
|
||||||
// logic and end up only executing the container stop command (which may or
|
|
||||||
// may not work as expected).
|
|
||||||
if s.Type == "" || s.Type == remote.ProcessStopSignal {
|
|
||||||
if s.Type == "" {
|
|
||||||
log.WithField("container_id", e.Id).Warn("no stop configuration detected for environment, using termination procedure")
|
|
||||||
}
|
|
||||||
|
|
||||||
signal := os.Kill
|
|
||||||
// Handle a few common cases, otherwise just fall through and just pass along
|
|
||||||
// the os.Kill signal to the process.
|
|
||||||
switch strings.ToUpper(s.Value) {
|
|
||||||
case "SIGABRT":
|
|
||||||
signal = syscall.SIGABRT
|
|
||||||
case "SIGINT":
|
|
||||||
signal = syscall.SIGINT
|
|
||||||
case "SIGTERM":
|
|
||||||
signal = syscall.SIGTERM
|
|
||||||
}
|
|
||||||
return e.Terminate(ctx, signal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the process is already offline don't switch it back to stopping. Just leave it how
|
// If the process is already offline don't switch it back to stopping. Just leave it how
|
||||||
// it is and continue through to the stop handling for the process.
|
// it is and continue through to the stop handling for the process.
|
||||||
if e.st.Load() != environment.ProcessOfflineState {
|
if e.st.Load() != environment.ProcessOfflineState {
|
||||||
e.SetState(environment.ProcessStoppingState)
|
e.SetState(environment.ProcessStoppingState)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle signal based actions
|
||||||
|
if s.Type == remote.ProcessStopSignal {
|
||||||
|
log.WithField("signal_value", s.Value).Debug("stopping server using signal")
|
||||||
|
|
||||||
|
// Handle some common signals - Default to SIGKILL
|
||||||
|
signal := "SIGKILL"
|
||||||
|
switch strings.ToUpper(s.Value) {
|
||||||
|
case "SIGABRT":
|
||||||
|
signal = "SIGABRT"
|
||||||
|
case "SIGINT", "C":
|
||||||
|
signal = "SIGINT"
|
||||||
|
case "SIGTERM":
|
||||||
|
signal = "SIGTERM"
|
||||||
|
case "SIGKILL":
|
||||||
|
signal = "SIGKILL"
|
||||||
|
default:
|
||||||
|
log.Info("Unrecognised signal requested, defaulting to SIGKILL")
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.SignalContainer(ctx, signal)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle command based stops
|
||||||
// Only attempt to send the stop command to the instance if we are actually attached to
|
// Only attempt to send the stop command to the instance if we are actually attached to
|
||||||
// the instance. If we are not for some reason, just send the container stop event.
|
// the instance. If we are not for some reason, just send the container stop event.
|
||||||
if e.IsAttached() && s.Type == remote.ProcessStopCommand {
|
if e.IsAttached() && s.Type == remote.ProcessStopCommand {
|
||||||
return e.SendCommand(s.Value)
|
return e.SendCommand(s.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allow the stop action to run for however long it takes, similar to executing a command
|
if s.Type == "" {
|
||||||
// and using a different logic pathway to wait for the container to stop successfully.
|
log.WithField("container_id", e.Id).Warn("no stop configuration detected for environment, using native docker stop")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to a native docker stop. As we aren't passing a signal to ContainerStop docker will
|
||||||
|
// attempt to stop the container using the default stop signal, SIGTERM, unless
|
||||||
|
// another signal was specified in the Dockerfile
|
||||||
//
|
//
|
||||||
// Using a negative timeout here will allow the container to stop gracefully,
|
// Using a negative timeout here will allow the container to stop gracefully,
|
||||||
// rather than forcefully terminating it. Value is in seconds, but -1 is
|
// rather than forcefully terminating it. Value is in seconds, but -1 is
|
||||||
@ -224,7 +229,7 @@ func (e *Environment) WaitForStop(ctx context.Context, duration time.Duration, t
|
|||||||
|
|
||||||
doTermination := func(s string) error {
|
doTermination := func(s string) error {
|
||||||
e.log().WithField("step", s).WithField("duration", duration).Warn("container stop did not complete in time, terminating process...")
|
e.log().WithField("step", s).WithField("duration", duration).Warn("container stop did not complete in time, terminating process...")
|
||||||
return e.Terminate(ctx, os.Kill)
|
return e.Terminate(ctx, "SIGKILL")
|
||||||
}
|
}
|
||||||
|
|
||||||
// We pass through the timed context for this stop action so that if one of the
|
// We pass through the timed context for this stop action so that if one of the
|
||||||
@ -268,8 +273,8 @@ func (e *Environment) WaitForStop(ctx context.Context, duration time.Duration, t
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Terminate forcefully terminates the container using the signal provided.
|
// Sends the specified signal to the container in an attempt to stop it.
|
||||||
func (e *Environment) Terminate(ctx context.Context, signal os.Signal) error {
|
func (e *Environment) SignalContainer(ctx context.Context, signal string) error {
|
||||||
c, err := e.ContainerInspect(ctx)
|
c, err := e.ContainerInspect(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Treat missing containers as an okay error state, means it is obviously
|
// Treat missing containers as an okay error state, means it is obviously
|
||||||
@ -294,10 +299,23 @@ func (e *Environment) Terminate(ctx context.Context, signal os.Signal) error {
|
|||||||
|
|
||||||
// We set it to stopping than offline to prevent crash detection from being triggered.
|
// We set it to stopping than offline to prevent crash detection from being triggered.
|
||||||
e.SetState(environment.ProcessStoppingState)
|
e.SetState(environment.ProcessStoppingState)
|
||||||
sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed")
|
if err := e.client.ContainerKill(ctx, e.Id, signal); err != nil && !client.IsErrNotFound(err) {
|
||||||
if err := e.client.ContainerKill(ctx, e.Id, sig); err != nil && !client.IsErrNotFound(err) {
|
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Terminate forcefully terminates the container using the signal provided.
|
||||||
|
// then sets its state to stopped.
|
||||||
|
func (e *Environment) Terminate(ctx context.Context, signal string) error {
|
||||||
|
// Send the signal to the container to kill it
|
||||||
|
if err := e.SignalContainer(ctx, signal); err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We expect Terminate to instantly kill the container
|
||||||
|
// so go ahead and mark it as dead and clean up
|
||||||
e.SetState(environment.ProcessOfflineState)
|
e.SetState(environment.ProcessOfflineState)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@ -2,13 +2,13 @@ package docker
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
)
|
)
|
||||||
@ -57,7 +57,7 @@ func (e *Environment) pollResources(ctx context.Context) error {
|
|||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
default:
|
default:
|
||||||
var v types.StatsJSON
|
var v container.StatsResponse
|
||||||
if err := dec.Decode(&v); err != nil {
|
if err := dec.Decode(&v); err != nil {
|
||||||
if err != io.EOF && !errors.Is(err, context.Canceled) {
|
if err != io.EOF && !errors.Is(err, context.Canceled) {
|
||||||
e.log().WithField("error", err).Warn("error while processing Docker stats output for container")
|
e.log().WithField("error", err).Warn("error while processing Docker stats output for container")
|
||||||
@ -95,7 +95,7 @@ func (e *Environment) pollResources(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The "docker stats" CLI call does not return the same value as the types.MemoryStats.Usage
|
// The "docker stats" CLI call does not return the same value as the [container.MemoryStats].Usage
|
||||||
// value which can be rather confusing to people trying to compare panel usage to
|
// value which can be rather confusing to people trying to compare panel usage to
|
||||||
// their stats output.
|
// their stats output.
|
||||||
//
|
//
|
||||||
@ -103,7 +103,7 @@ func (e *Environment) pollResources(ctx context.Context) error {
|
|||||||
// bothering me about it. It should also reflect a slightly more correct memory value anyways.
|
// bothering me about it. It should also reflect a slightly more correct memory value anyways.
|
||||||
//
|
//
|
||||||
// @see https://github.com/docker/cli/blob/96e1d1d6/cli/command/container/stats_helpers.go#L227-L249
|
// @see https://github.com/docker/cli/blob/96e1d1d6/cli/command/container/stats_helpers.go#L227-L249
|
||||||
func calculateDockerMemory(stats types.MemoryStats) uint64 {
|
func calculateDockerMemory(stats container.MemoryStats) uint64 {
|
||||||
if v, ok := stats.Stats["total_inactive_file"]; ok && v < stats.Usage {
|
if v, ok := stats.Stats["total_inactive_file"]; ok && v < stats.Usage {
|
||||||
return stats.Usage - v
|
return stats.Usage - v
|
||||||
}
|
}
|
||||||
@ -119,7 +119,7 @@ func calculateDockerMemory(stats types.MemoryStats) uint64 {
|
|||||||
// by the defined CPU limits on the container.
|
// by the defined CPU limits on the container.
|
||||||
//
|
//
|
||||||
// @see https://github.com/docker/cli/blob/aa097cf1aa19099da70930460250797c8920b709/cli/command/container/stats_helpers.go#L166
|
// @see https://github.com/docker/cli/blob/aa097cf1aa19099da70930460250797c8920b709/cli/command/container/stats_helpers.go#L166
|
||||||
func calculateDockerAbsoluteCpu(pStats types.CPUStats, stats types.CPUStats) float64 {
|
func calculateDockerAbsoluteCpu(pStats container.CPUStats, stats container.CPUStats) float64 {
|
||||||
// Calculate the change in CPU usage between the current and previous reading.
|
// Calculate the change in CPU usage between the current and previous reading.
|
||||||
cpuDelta := float64(stats.CPUUsage.TotalUsage) - float64(pStats.CPUUsage.TotalUsage)
|
cpuDelta := float64(stats.CPUUsage.TotalUsage) - float64(pStats.CPUUsage.TotalUsage)
|
||||||
|
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package environment
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/events"
|
"github.com/pterodactyl/wings/events"
|
||||||
@ -72,7 +71,7 @@ type ProcessEnvironment interface {
|
|||||||
|
|
||||||
// Terminate stops a running server instance using the provided signal. This function
|
// Terminate stops a running server instance using the provided signal. This function
|
||||||
// is a no-op if the server is already stopped.
|
// is a no-op if the server is already stopped.
|
||||||
Terminate(ctx context.Context, signal os.Signal) error
|
Terminate(ctx context.Context, signal string) error
|
||||||
|
|
||||||
// Destroys the environment removing any containers that were created (in Docker
|
// Destroys the environment removing any containers that were created (in Docker
|
||||||
// environments at least).
|
// environments at least).
|
||||||
|
|||||||
@ -34,7 +34,7 @@ type Mount struct {
|
|||||||
// Limits is the build settings for a given server that impact docker container
|
// Limits is the build settings for a given server that impact docker container
|
||||||
// creation and resource limits for a server instance.
|
// creation and resource limits for a server instance.
|
||||||
type Limits struct {
|
type Limits struct {
|
||||||
// The total amount of memory in megabytes that this server is allowed to
|
// The total amount of memory in mebibytes that this server is allowed to
|
||||||
// use on the host system.
|
// use on the host system.
|
||||||
MemoryLimit int64 `json:"memory_limit"`
|
MemoryLimit int64 `json:"memory_limit"`
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ func (l Limits) MemoryOverheadMultiplier() float64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l Limits) BoundedMemoryLimit() int64 {
|
func (l Limits) BoundedMemoryLimit() int64 {
|
||||||
return int64(math.Round(float64(l.MemoryLimit) * l.MemoryOverheadMultiplier() * 1_000_000))
|
return int64(math.Round(float64(l.MemoryLimit) * l.MemoryOverheadMultiplier() * 1024 * 1024))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvertedSwap returns the amount of swap available as a total in bytes. This
|
// ConvertedSwap returns the amount of swap available as a total in bytes. This
|
||||||
@ -90,7 +90,7 @@ func (l Limits) ConvertedSwap() int64 {
|
|||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
return (l.Swap * 1_000_000) + l.BoundedMemoryLimit()
|
return (l.Swap * 1024 * 1024) + l.BoundedMemoryLimit()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessLimit returns the process limit for a container. This is currently
|
// ProcessLimit returns the process limit for a container. This is currently
|
||||||
@ -105,7 +105,7 @@ func (l Limits) AsContainerResources() container.Resources {
|
|||||||
pids := l.ProcessLimit()
|
pids := l.ProcessLimit()
|
||||||
resources := container.Resources{
|
resources := container.Resources{
|
||||||
Memory: l.BoundedMemoryLimit(),
|
Memory: l.BoundedMemoryLimit(),
|
||||||
MemoryReservation: l.MemoryLimit * 1_000_000,
|
MemoryReservation: l.MemoryLimit * 1024 * 1024,
|
||||||
MemorySwap: l.ConvertedSwap(),
|
MemorySwap: l.ConvertedSwap(),
|
||||||
BlkioWeight: l.IoWeight,
|
BlkioWeight: l.IoWeight,
|
||||||
OomKillDisable: &l.OOMDisabled,
|
OomKillDisable: &l.OOMDisabled,
|
||||||
|
|||||||
@ -1,10 +1,10 @@
|
|||||||
package events
|
package events
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|||||||
82
flake.lock
generated
Normal file
82
flake.lock
generated
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"flake-parts": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs-lib": "nixpkgs-lib"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1741352980,
|
||||||
|
"narHash": "sha256-+u2UunDA4Cl5Fci3m7S643HzKmIDAe+fiXrLqYsR2fs=",
|
||||||
|
"owner": "hercules-ci",
|
||||||
|
"repo": "flake-parts",
|
||||||
|
"rev": "f4330d22f1c5d2ba72d3d22df5597d123fdb60a9",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "hercules-ci",
|
||||||
|
"repo": "flake-parts",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1741379970,
|
||||||
|
"narHash": "sha256-Wh7esNh7G24qYleLvgOSY/7HlDUzWaL/n4qzlBePpiw=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "36fd87baa9083f34f7f5027900b62ee6d09b1f2f",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs-lib": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1740877520,
|
||||||
|
"narHash": "sha256-oiwv/ZK/2FhGxrCkQkB83i7GnWXPPLzoqFHpDD3uYpk=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "nixpkgs.lib",
|
||||||
|
"rev": "147dee35aab2193b174e4c0868bd80ead5ce755c",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "nixpkgs.lib",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-parts": "flake-parts",
|
||||||
|
"nixpkgs": "nixpkgs",
|
||||||
|
"treefmt-nix": "treefmt-nix"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"treefmt-nix": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1739829690,
|
||||||
|
"narHash": "sha256-mL1szCeIsjh6Khn3nH2cYtwO5YXG6gBiTw1A30iGeDU=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "treefmt-nix",
|
||||||
|
"rev": "3d0579f5cc93436052d94b73925b48973a104204",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "treefmt-nix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
||||||
54
flake.nix
Normal file
54
flake.nix
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
{
|
||||||
|
description = "Wings";
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||||
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||||
|
|
||||||
|
treefmt-nix = {
|
||||||
|
url = "github:numtide/treefmt-nix";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs = {...} @ inputs:
|
||||||
|
inputs.flake-parts.lib.mkFlake {inherit inputs;} {
|
||||||
|
systems = inputs.nixpkgs.lib.systems.flakeExposed;
|
||||||
|
|
||||||
|
imports = [
|
||||||
|
inputs.treefmt-nix.flakeModule
|
||||||
|
];
|
||||||
|
|
||||||
|
perSystem = {system, ...}: let
|
||||||
|
pkgs = import inputs.nixpkgs {inherit system;};
|
||||||
|
in {
|
||||||
|
devShells.default = pkgs.mkShell {
|
||||||
|
buildInputs = with pkgs; [
|
||||||
|
go_1_24
|
||||||
|
gofumpt
|
||||||
|
golangci-lint
|
||||||
|
gotools
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
treefmt = {
|
||||||
|
projectRootFile = "flake.nix";
|
||||||
|
|
||||||
|
programs = {
|
||||||
|
alejandra.enable = true;
|
||||||
|
deadnix.enable = true;
|
||||||
|
gofumpt = {
|
||||||
|
enable = true;
|
||||||
|
extra = true;
|
||||||
|
};
|
||||||
|
shellcheck.enable = true;
|
||||||
|
shfmt = {
|
||||||
|
enable = true;
|
||||||
|
indent_size = 0; # 0 causes shfmt to use tabs
|
||||||
|
};
|
||||||
|
yamlfmt.enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
155
go.mod
155
go.mod
@ -1,123 +1,144 @@
|
|||||||
module github.com/pterodactyl/wings
|
module github.com/pterodactyl/wings
|
||||||
|
|
||||||
go 1.18
|
go 1.23.0
|
||||||
|
|
||||||
|
toolchain go1.24.1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
emperror.dev/errors v0.8.1
|
emperror.dev/errors v0.8.1
|
||||||
github.com/AlecAivazis/survey/v2 v2.3.6
|
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||||
github.com/Jeffail/gabs/v2 v2.7.0
|
github.com/Jeffail/gabs/v2 v2.7.0
|
||||||
github.com/NYTimes/logrotate v1.0.0
|
github.com/NYTimes/logrotate v1.0.0
|
||||||
github.com/acobaugh/osrelease v0.1.0
|
github.com/acobaugh/osrelease v0.1.0
|
||||||
github.com/apex/log v1.9.0
|
github.com/apex/log v1.9.0
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
|
||||||
github.com/beevik/etree v1.1.4
|
github.com/beevik/etree v1.5.0
|
||||||
github.com/buger/jsonparser v1.1.1
|
github.com/buger/jsonparser v1.1.1
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1
|
github.com/cenkalti/backoff/v4 v4.3.0
|
||||||
github.com/creasty/defaults v1.7.0
|
github.com/creasty/defaults v1.8.0
|
||||||
github.com/docker/docker v23.0.6+incompatible
|
github.com/docker/docker v28.3.3+incompatible
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.5.0
|
||||||
github.com/fatih/color v1.15.0
|
github.com/fatih/color v1.18.0
|
||||||
github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf
|
github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf
|
||||||
github.com/gabriel-vasile/mimetype v1.4.2
|
github.com/gabriel-vasile/mimetype v1.4.8
|
||||||
github.com/gammazero/workerpool v1.1.3
|
github.com/gammazero/workerpool v1.1.3
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
||||||
github.com/gin-gonic/gin v1.9.0
|
github.com/gin-gonic/gin v1.10.1
|
||||||
github.com/glebarez/sqlite v1.8.0
|
github.com/glebarez/sqlite v1.11.0
|
||||||
github.com/go-co-op/gocron v1.25.0
|
github.com/go-co-op/gocron v1.37.0
|
||||||
github.com/goccy/go-json v0.10.2
|
github.com/google/uuid v1.6.0
|
||||||
github.com/google/uuid v1.3.0
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/gorilla/websocket v1.5.0
|
github.com/iancoleman/strcase v0.3.0
|
||||||
github.com/iancoleman/strcase v0.2.0
|
|
||||||
github.com/icza/dyno v0.0.0-20230330125955-09f820a8d9c0
|
github.com/icza/dyno v0.0.0-20230330125955-09f820a8d9c0
|
||||||
github.com/juju/ratelimit v1.0.2
|
github.com/juju/ratelimit v1.0.2
|
||||||
github.com/karrick/godirwalk v1.17.0
|
github.com/klauspost/compress v1.18.0
|
||||||
github.com/klauspost/compress v1.16.5
|
|
||||||
github.com/klauspost/pgzip v1.2.6
|
github.com/klauspost/pgzip v1.2.6
|
||||||
github.com/magiconair/properties v1.8.7
|
github.com/magiconair/properties v1.8.9
|
||||||
github.com/mattn/go-colorable v0.1.13
|
github.com/mattn/go-colorable v0.1.14
|
||||||
github.com/mholt/archiver/v4 v4.0.0-alpha.8
|
github.com/mholt/archives v0.1.3
|
||||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
github.com/pkg/sftp v1.13.5
|
github.com/pkg/sftp v1.13.9
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
||||||
github.com/spf13/cobra v1.7.0
|
github.com/spf13/cobra v1.9.1
|
||||||
github.com/stretchr/testify v1.8.2
|
github.com/stretchr/testify v1.10.0
|
||||||
golang.org/x/crypto v0.8.0
|
golang.org/x/crypto v0.41.0
|
||||||
golang.org/x/sync v0.2.0
|
golang.org/x/sync v0.16.0
|
||||||
|
golang.org/x/sys v0.35.0
|
||||||
gopkg.in/ini.v1 v1.67.0
|
gopkg.in/ini.v1 v1.67.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
gorm.io/gorm v1.25.1
|
gorm.io/gorm v1.26.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.9.9 // indirect
|
github.com/Microsoft/hcsshim v0.12.9 // indirect
|
||||||
github.com/andybalholm/brotli v1.0.5 // indirect
|
github.com/STARRY-S/zip v0.2.2 // indirect
|
||||||
|
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 // indirect
|
||||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||||
github.com/bodgit/sevenzip v1.4.1 // indirect
|
github.com/bodgit/sevenzip v1.6.0 // indirect
|
||||||
github.com/bodgit/windows v1.0.1 // indirect
|
github.com/bodgit/windows v1.0.1 // indirect
|
||||||
github.com/bytedance/sonic v1.8.8 // indirect
|
github.com/bytedance/sonic v1.13.1 // indirect
|
||||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||||
|
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||||
|
github.com/containerd/errdefs v0.3.0 // indirect
|
||||||
|
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||||
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
github.com/distribution/reference v0.6.0 // indirect
|
||||||
github.com/docker/go-units v0.5.0 // indirect
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/gammazero/deque v0.2.1 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
github.com/gammazero/deque v1.0.0 // indirect
|
||||||
github.com/glebarez/go-sqlite v1.21.1 // indirect
|
github.com/gin-contrib/sse v1.0.0 // indirect
|
||||||
|
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||||
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-playground/locales v0.14.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.13.0 // indirect
|
github.com/go-playground/validator/v10 v10.25.0 // indirect
|
||||||
|
github.com/goccy/go-json v0.10.5 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
|
||||||
github.com/google/go-cmp v0.5.9 // indirect
|
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/jinzhu/now v1.1.5 // indirect
|
github.com/jinzhu/now v1.1.5 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/leodido/go-urn v1.2.4 // indirect
|
github.com/leodido/go-urn v1.4.0 // indirect
|
||||||
github.com/magefile/mage v1.14.0 // indirect
|
github.com/magefile/mage v1.15.0 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
|
github.com/mikelolasagasti/xz v1.0.1 // indirect
|
||||||
|
github.com/minio/minlz v1.0.0 // indirect
|
||||||
|
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||||
|
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||||
|
github.com/nwaples/rardecode/v2 v2.1.1 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
|
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.7 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||||
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
||||||
github.com/therootcompany/xz v1.0.1 // indirect
|
github.com/spf13/pflag v1.0.6 // indirect
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||||
github.com/ulikunitz/xz v0.5.11 // indirect
|
github.com/ulikunitz/xz v0.5.14 // indirect
|
||||||
|
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
|
||||||
|
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||||
|
go.uber.org/atomic v1.11.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||||
golang.org/x/arch v0.3.0 // indirect
|
golang.org/x/arch v0.15.0 // indirect
|
||||||
golang.org/x/mod v0.10.0 // indirect
|
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
|
||||||
golang.org/x/net v0.10.0 // indirect
|
golang.org/x/net v0.42.0 // indirect
|
||||||
golang.org/x/sys v0.8.0 // indirect
|
golang.org/x/term v0.34.0 // indirect
|
||||||
golang.org/x/term v0.8.0 // indirect
|
golang.org/x/text v0.28.0 // indirect
|
||||||
golang.org/x/text v0.9.0 // indirect
|
|
||||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||||
golang.org/x/tools v0.8.0 // indirect
|
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
google.golang.org/protobuf v1.36.5 // indirect
|
||||||
google.golang.org/protobuf v1.30.0 // indirect
|
gotest.tools/v3 v3.0.2 // indirect
|
||||||
modernc.org/libc v1.22.5 // indirect
|
modernc.org/libc v1.61.13 // indirect
|
||||||
modernc.org/mathutil v1.5.0 // indirect
|
modernc.org/mathutil v1.7.1 // indirect
|
||||||
modernc.org/memory v1.5.0 // indirect
|
modernc.org/memory v1.8.2 // indirect
|
||||||
modernc.org/sqlite v1.22.1 // indirect
|
modernc.org/sqlite v1.36.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
@ -56,6 +56,7 @@ func (ac *activityCron) Run(ctx context.Context) error {
|
|||||||
activities = append(activities, v)
|
activities = append(activities, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete any invalid activies
|
||||||
if len(ids) > 0 {
|
if len(ids) > 0 {
|
||||||
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{})
|
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{})
|
||||||
if tx.Error != nil {
|
if tx.Error != nil {
|
||||||
@ -71,16 +72,28 @@ func (ac *activityCron) Run(ctx context.Context) error {
|
|||||||
return errors.WrapIf(err, "cron: failed to send activity events to Panel")
|
return errors.WrapIf(err, "cron: failed to send activity events to Panel")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add all the successful activities to the list of IDs to delete.
|
|
||||||
ids = make([]int, len(activities))
|
ids = make([]int, len(activities))
|
||||||
for i, v := range activities {
|
for i, v := range activities {
|
||||||
ids[i] = v.ID
|
ids[i] = v.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete all the activities that were sent to the Panel (or that were invalid).
|
// SQLite has a limitation of how many parameters we can specify in a single
|
||||||
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{})
|
// query, so we need to delete the activies in chunks of 32,000 instead of
|
||||||
if tx.Error != nil {
|
// all at once.
|
||||||
return errors.WithStack(tx.Error)
|
i := 0
|
||||||
|
idsLen := len(ids)
|
||||||
|
for i < idsLen {
|
||||||
|
start := i
|
||||||
|
end := min(i+32000, idsLen)
|
||||||
|
batchSize := end - start
|
||||||
|
|
||||||
|
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids[start:end]).Delete(&models.Activity{})
|
||||||
|
if tx.Error != nil {
|
||||||
|
return errors.WithStack(tx.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
i += batchSize
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,6 +5,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/internal/database"
|
"github.com/pterodactyl/wings/internal/database"
|
||||||
"github.com/pterodactyl/wings/internal/models"
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
@ -83,9 +84,26 @@ func (sc *sftpCron) Run(ctx context.Context) error {
|
|||||||
if err := sc.manager.Client().SendActivityLogs(ctx, events.Elements()); err != nil {
|
if err := sc.manager.Client().SendActivityLogs(ctx, events.Elements()); err != nil {
|
||||||
return errors.Wrap(err, "failed to send sftp activity logs to Panel")
|
return errors.Wrap(err, "failed to send sftp activity logs to Panel")
|
||||||
}
|
}
|
||||||
if tx := database.Instance().Where("id IN ?", events.ids).Delete(&models.Activity{}); tx.Error != nil {
|
|
||||||
return errors.WithStack(tx.Error)
|
// SQLite has a limitation of how many parameters we can specify in a single
|
||||||
|
// query, so we need to delete the activies in chunks of 32,000 instead of
|
||||||
|
// all at once.
|
||||||
|
i := 0
|
||||||
|
idsLen := len(events.ids)
|
||||||
|
var tx *gorm.DB
|
||||||
|
for i < idsLen {
|
||||||
|
start := i
|
||||||
|
end := min(i+32000, idsLen)
|
||||||
|
batchSize := end - start
|
||||||
|
|
||||||
|
tx = database.Instance().WithContext(ctx).Where("id IN ?", events.ids[start:end]).Delete(&models.Activity{})
|
||||||
|
if tx.Error != nil {
|
||||||
|
return errors.WithStack(tx.Error)
|
||||||
|
}
|
||||||
|
|
||||||
|
i += batchSize
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -2,9 +2,9 @@ package models
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type JsonNullString struct {
|
type JsonNullString struct {
|
||||||
|
|||||||
21
internal/ufs/LICENSE
Normal file
21
internal/ufs/LICENSE
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
21
internal/ufs/README.md
Normal file
21
internal/ufs/README.md
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# Filesystem
|
||||||
|
|
||||||
|
Coming Soon™
|
||||||
|
|
||||||
|
> TODO
|
||||||
|
|
||||||
|
## Licensing
|
||||||
|
|
||||||
|
Most code in this package is licensed under `MIT` with some exceptions.
|
||||||
|
|
||||||
|
The following files are licensed under `BSD-3-Clause` due to them being copied
|
||||||
|
verbatim or derived from [Go](https://go.dev)'s source code.
|
||||||
|
|
||||||
|
- [`file_posix.go`](./file_posix.go)
|
||||||
|
- [`mkdir_unix.go`](./mkdir_unix.go)
|
||||||
|
- [`path_unix.go`](./path_unix.go)
|
||||||
|
- [`removeall_unix.go`](./removeall_unix.go)
|
||||||
|
- [`stat_unix.go`](./stat_unix.go)
|
||||||
|
- [`walk.go`](./walk.go)
|
||||||
|
|
||||||
|
These changes are not associated with nor endorsed by The Go Authors.
|
||||||
12
internal/ufs/doc.go
Normal file
12
internal/ufs/doc.go
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
// Package ufs provides an abstraction layer for performing I/O on filesystems.
|
||||||
|
// This package is designed to be used in-place of standard `os` package I/O
|
||||||
|
// calls, and is not designed to be used as a generic filesystem abstraction
|
||||||
|
// like the `io/fs` package.
|
||||||
|
//
|
||||||
|
// The primary use-case of this package was to provide a "chroot-like" `os`
|
||||||
|
// wrapper, so we can safely sandbox I/O operations within a directory and
|
||||||
|
// use untrusted arbitrary paths.
|
||||||
|
package ufs
|
||||||
183
internal/ufs/error.go
Normal file
183
internal/ufs/error.go
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
iofs "io/fs"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrIsDirectory is an error for when an operation that operates only on
|
||||||
|
// files is given a path to a directory.
|
||||||
|
ErrIsDirectory = errors.New("is a directory")
|
||||||
|
// ErrNotDirectory is an error for when an operation that operates only on
|
||||||
|
// directories is given a path to a file.
|
||||||
|
ErrNotDirectory = errors.New("not a directory")
|
||||||
|
// ErrBadPathResolution is an error for when a sand-boxed filesystem
|
||||||
|
// resolves a given path to a forbidden location.
|
||||||
|
ErrBadPathResolution = errors.New("bad path resolution")
|
||||||
|
// ErrNotRegular is an error for when an operation that operates only on
|
||||||
|
// regular files is passed something other than a regular file.
|
||||||
|
ErrNotRegular = errors.New("not a regular file")
|
||||||
|
|
||||||
|
// ErrClosed is an error for when an entry was accessed after being closed.
|
||||||
|
ErrClosed = iofs.ErrClosed
|
||||||
|
// ErrInvalid is an error for when an invalid argument was used.
|
||||||
|
ErrInvalid = iofs.ErrInvalid
|
||||||
|
// ErrExist is an error for when an entry already exists.
|
||||||
|
ErrExist = iofs.ErrExist
|
||||||
|
// ErrNotExist is an error for when an entry does not exist.
|
||||||
|
ErrNotExist = iofs.ErrNotExist
|
||||||
|
// ErrPermission is an error for when the required permissions to perform an
|
||||||
|
// operation are missing.
|
||||||
|
ErrPermission = iofs.ErrPermission
|
||||||
|
)
|
||||||
|
|
||||||
|
// LinkError records an error during a link or symlink or rename
|
||||||
|
// system call and the paths that caused it.
|
||||||
|
type LinkError = os.LinkError
|
||||||
|
|
||||||
|
// PathError records an error and the operation and file path that caused it.
|
||||||
|
type PathError = iofs.PathError
|
||||||
|
|
||||||
|
// SyscallError records an error from a specific system call.
|
||||||
|
type SyscallError = os.SyscallError
|
||||||
|
|
||||||
|
// NewSyscallError returns, as an error, a new [*os.SyscallError] with the
|
||||||
|
// given system call name and error details. As a convenience, if err is nil,
|
||||||
|
// [NewSyscallError] returns nil.
|
||||||
|
func NewSyscallError(syscall string, err error) error {
|
||||||
|
return os.NewSyscallError(syscall, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertErrorType converts errors into our custom errors to ensure consistent
|
||||||
|
// error values.
|
||||||
|
func convertErrorType(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var pErr *PathError
|
||||||
|
if errors.As(err, &pErr) {
|
||||||
|
if errno, ok := pErr.Err.(syscall.Errno); ok {
|
||||||
|
return errnoToPathError(errno, pErr.Op, pErr.Path)
|
||||||
|
}
|
||||||
|
return pErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the error wasn't already a path error and is a errno, wrap it with
|
||||||
|
// details that we can use to know there is something wrong with our
|
||||||
|
// error wrapping somewhere.
|
||||||
|
var errno syscall.Errno
|
||||||
|
if errors.As(err, &errno) {
|
||||||
|
return &PathError{
|
||||||
|
Op: "!(UNKNOWN)",
|
||||||
|
Path: "!(UNKNOWN)",
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensurePathError ensures that err is a PathError. The op and path arguments
|
||||||
|
// are only used of the error isn't already a PathError.
|
||||||
|
func ensurePathError(err error, op, path string) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the error is already a PathError.
|
||||||
|
var pErr *PathError
|
||||||
|
if errors.As(err, &pErr) {
|
||||||
|
// If underlying error is a errno, convert it.
|
||||||
|
//
|
||||||
|
// DO NOT USE `errors.As` or whatever here, the error will either be
|
||||||
|
// an errno, or it will be wrapped already.
|
||||||
|
if errno, ok := pErr.Err.(syscall.Errno); ok {
|
||||||
|
return errnoToPathError(errno, pErr.Op, pErr.Path)
|
||||||
|
}
|
||||||
|
// Return the PathError as-is without modification.
|
||||||
|
return pErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the error is directly an errno, convert it to a PathError.
|
||||||
|
var errno syscall.Errno
|
||||||
|
if errors.As(err, &errno) {
|
||||||
|
return errnoToPathError(errno, op, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise just wrap it as a PathError without any additional changes.
|
||||||
|
return &PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: path,
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// errnoToPathError converts an errno into a proper path error.
|
||||||
|
func errnoToPathError(err syscall.Errno, op, path string) error {
|
||||||
|
switch err {
|
||||||
|
// File exists
|
||||||
|
case unix.EEXIST:
|
||||||
|
return &PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: path,
|
||||||
|
Err: ErrExist,
|
||||||
|
}
|
||||||
|
// Is a directory
|
||||||
|
case unix.EISDIR:
|
||||||
|
return &PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: path,
|
||||||
|
Err: ErrIsDirectory,
|
||||||
|
}
|
||||||
|
// Not a directory
|
||||||
|
case unix.ENOTDIR:
|
||||||
|
return &PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: path,
|
||||||
|
Err: ErrNotDirectory,
|
||||||
|
}
|
||||||
|
// No such file or directory
|
||||||
|
case unix.ENOENT:
|
||||||
|
return &PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: path,
|
||||||
|
Err: ErrNotExist,
|
||||||
|
}
|
||||||
|
// Operation not permitted
|
||||||
|
case unix.EPERM:
|
||||||
|
return &PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: path,
|
||||||
|
Err: ErrPermission,
|
||||||
|
}
|
||||||
|
// Invalid cross-device link
|
||||||
|
case unix.EXDEV:
|
||||||
|
return &PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: path,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
// Too many levels of symbolic links
|
||||||
|
case unix.ELOOP:
|
||||||
|
return &PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: path,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return &PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: path,
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
180
internal/ufs/file.go
Normal file
180
internal/ufs/file.go
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
iofs "io/fs"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DirEntry is an entry read from a directory.
|
||||||
|
type DirEntry = iofs.DirEntry
|
||||||
|
|
||||||
|
// File describes readable and/or writable file from a Filesystem.
|
||||||
|
type File interface {
|
||||||
|
// Name returns the base name of the file.
|
||||||
|
Name() string
|
||||||
|
|
||||||
|
// Stat returns the FileInfo structure describing the file.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Stat() (FileInfo, error)
|
||||||
|
|
||||||
|
// ReadDir reads the contents of the directory associated with the file f
|
||||||
|
// and returns a slice of DirEntry values in directory order.
|
||||||
|
// Subsequent calls on the same file will yield later DirEntry records in the directory.
|
||||||
|
//
|
||||||
|
// If n > 0, ReadDir returns at most n DirEntry records.
|
||||||
|
// In this case, if ReadDir returns an empty slice, it will return an error explaining why.
|
||||||
|
// At the end of a directory, the error is io.EOF.
|
||||||
|
//
|
||||||
|
// If n <= 0, ReadDir returns all the DirEntry records remaining in the directory.
|
||||||
|
// When it succeeds, it returns a nil error (not io.EOF).
|
||||||
|
ReadDir(n int) ([]DirEntry, error)
|
||||||
|
|
||||||
|
// Readdirnames reads the contents of the directory associated with file
|
||||||
|
// and returns a slice of up to n names of files in the directory,
|
||||||
|
// in directory order. Subsequent calls on the same file will yield
|
||||||
|
// further names.
|
||||||
|
//
|
||||||
|
// If n > 0, Readdirnames returns at most n names. In this case, if
|
||||||
|
// Readdirnames returns an empty slice, it will return a non-nil error
|
||||||
|
// explaining why. At the end of a directory, the error is io.EOF.
|
||||||
|
//
|
||||||
|
// If n <= 0, Readdirnames returns all the names from the directory in
|
||||||
|
// a single slice. In this case, if Readdirnames succeeds (reads all
|
||||||
|
// the way to the end of the directory), it returns the slice and a
|
||||||
|
// nil error. If it encounters an error before the end of the
|
||||||
|
// directory, Readdirnames returns the names read until that point and
|
||||||
|
// a non-nil error.
|
||||||
|
Readdirnames(n int) (names []string, err error)
|
||||||
|
|
||||||
|
// Fd returns the integer Unix file descriptor referencing the open file.
|
||||||
|
// If f is closed, the file descriptor becomes invalid.
|
||||||
|
// If f is garbage collected, a finalizer may close the file descriptor,
|
||||||
|
// making it invalid; see runtime.SetFinalizer for more information on when
|
||||||
|
// a finalizer might be run. On Unix systems this will cause the SetDeadline
|
||||||
|
// methods to stop working.
|
||||||
|
// Because file descriptors can be reused, the returned file descriptor may
|
||||||
|
// only be closed through the Close method of f, or by its finalizer during
|
||||||
|
// garbage collection. Otherwise, during garbage collection the finalizer
|
||||||
|
// may close an unrelated file descriptor with the same (reused) number.
|
||||||
|
//
|
||||||
|
// As an alternative, see the f.SyscallConn method.
|
||||||
|
Fd() uintptr
|
||||||
|
|
||||||
|
// Truncate changes the size of the file.
|
||||||
|
// It does not change the I/O offset.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Truncate(size int64) error
|
||||||
|
|
||||||
|
io.Closer
|
||||||
|
|
||||||
|
io.Reader
|
||||||
|
io.ReaderAt
|
||||||
|
io.ReaderFrom
|
||||||
|
|
||||||
|
io.Writer
|
||||||
|
io.WriterAt
|
||||||
|
|
||||||
|
io.Seeker
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfo describes a file and is returned by Stat and Lstat.
|
||||||
|
type FileInfo = iofs.FileInfo
|
||||||
|
|
||||||
|
// FileMode represents a file's mode and permission bits.
|
||||||
|
// The bits have the same definition on all systems, so that
|
||||||
|
// information about files can be moved from one system
|
||||||
|
// to another portably. Not all bits apply to all systems.
|
||||||
|
// The only required bit is ModeDir for directories.
|
||||||
|
type FileMode = iofs.FileMode
|
||||||
|
|
||||||
|
// The defined file mode bits are the most significant bits of the FileMode.
|
||||||
|
// The nine least-significant bits are the standard Unix rwxrwxrwx permissions.
|
||||||
|
// The values of these bits should be considered part of the public API and
|
||||||
|
// may be used in wire protocols or disk representations: they must not be
|
||||||
|
// changed, although new bits might be added.
|
||||||
|
const (
|
||||||
|
// ModeDir represents a directory.
|
||||||
|
// d: is a directory
|
||||||
|
ModeDir = iofs.ModeDir
|
||||||
|
// ModeAppend represents an append-only file.
|
||||||
|
// a: append-only
|
||||||
|
ModeAppend = iofs.ModeAppend
|
||||||
|
// ModeExclusive represents an exclusive file.
|
||||||
|
// l: exclusive use
|
||||||
|
ModeExclusive = iofs.ModeExclusive
|
||||||
|
// ModeTemporary .
|
||||||
|
// T: temporary file; Plan 9 only.
|
||||||
|
ModeTemporary = iofs.ModeTemporary
|
||||||
|
// ModeSymlink .
|
||||||
|
// L: symbolic link.
|
||||||
|
ModeSymlink = iofs.ModeSymlink
|
||||||
|
// ModeDevice .
|
||||||
|
// D: device file.
|
||||||
|
ModeDevice = iofs.ModeDevice
|
||||||
|
// ModeNamedPipe .
|
||||||
|
// p: named pipe (FIFO)
|
||||||
|
ModeNamedPipe = iofs.ModeNamedPipe
|
||||||
|
// ModeSocket .
|
||||||
|
// S: Unix domain socket.
|
||||||
|
ModeSocket = iofs.ModeSocket
|
||||||
|
// ModeSetuid .
|
||||||
|
// u: setuid
|
||||||
|
ModeSetuid = iofs.ModeSetuid
|
||||||
|
// ModeSetgid .
|
||||||
|
// g: setgid
|
||||||
|
ModeSetgid = iofs.ModeSetgid
|
||||||
|
// ModeCharDevice .
|
||||||
|
// c: Unix character device, when ModeDevice is set
|
||||||
|
ModeCharDevice = iofs.ModeCharDevice
|
||||||
|
// ModeSticky .
|
||||||
|
// t: sticky
|
||||||
|
ModeSticky = iofs.ModeSticky
|
||||||
|
// ModeIrregular .
|
||||||
|
// ?: non-regular file; nothing else is known about this file.
|
||||||
|
ModeIrregular = iofs.ModeIrregular
|
||||||
|
|
||||||
|
// ModeType .
|
||||||
|
ModeType = iofs.ModeType
|
||||||
|
|
||||||
|
// ModePerm .
|
||||||
|
// Unix permission bits, 0o777.
|
||||||
|
ModePerm = iofs.ModePerm
|
||||||
|
)
|
||||||
|
|
||||||
|
// Re-using the same names as Go's official `unix` and `os` package do.
|
||||||
|
const (
|
||||||
|
// O_RDONLY opens the file read-only.
|
||||||
|
O_RDONLY = unix.O_RDONLY
|
||||||
|
// O_WRONLY opens the file write-only.
|
||||||
|
O_WRONLY = unix.O_WRONLY
|
||||||
|
// O_RDWR opens the file read-write.
|
||||||
|
O_RDWR = unix.O_RDWR
|
||||||
|
// O_APPEND appends data to the file when writing.
|
||||||
|
O_APPEND = unix.O_APPEND
|
||||||
|
// O_CREATE creates a new file if it doesn't exist.
|
||||||
|
O_CREATE = unix.O_CREAT
|
||||||
|
// O_EXCL is used with O_CREATE, file must not exist.
|
||||||
|
O_EXCL = unix.O_EXCL
|
||||||
|
// O_SYNC open for synchronous I/O.
|
||||||
|
O_SYNC = unix.O_SYNC
|
||||||
|
// O_TRUNC truncates regular writable file when opened.
|
||||||
|
O_TRUNC = unix.O_TRUNC
|
||||||
|
// O_DIRECTORY opens a directory only. If the entry is not a directory an
|
||||||
|
// error will be returned.
|
||||||
|
O_DIRECTORY = unix.O_DIRECTORY
|
||||||
|
// O_NOFOLLOW opens the exact path given without following symlinks.
|
||||||
|
O_NOFOLLOW = unix.O_NOFOLLOW
|
||||||
|
O_CLOEXEC = unix.O_CLOEXEC
|
||||||
|
O_LARGEFILE = unix.O_LARGEFILE
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
AT_SYMLINK_NOFOLLOW = unix.AT_SYMLINK_NOFOLLOW
|
||||||
|
AT_REMOVEDIR = unix.AT_REMOVEDIR
|
||||||
|
AT_EMPTY_PATH = unix.AT_EMPTY_PATH
|
||||||
|
)
|
||||||
49
internal/ufs/file_posix.go
Normal file
49
internal/ufs/file_posix.go
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was copied from `go/src/os/file_posix.go`.
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
//go:build unix || (js && wasm) || wasip1 || windows
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ignoringEINTR makes a function call and repeats it if it returns an
|
||||||
|
// EINTR error. This appears to be required even though we install all
|
||||||
|
// signal handlers with SA_RESTART: see https://go.dev/issue/22838,
|
||||||
|
// https://go.dev/issue/38033, https://go.dev/issue/38836,
|
||||||
|
// https://go.dev/issue/40846. Also, https://go.dev/issue/20400 and
|
||||||
|
// https://go.dev/issue/36644 are issues in which a signal handler is
|
||||||
|
// installed without setting SA_RESTART. None of these are the common case,
|
||||||
|
// but there are enough of them that it seems that we can't avoid
|
||||||
|
// an EINTR loop.
|
||||||
|
func ignoringEINTR(fn func() error) error {
|
||||||
|
for {
|
||||||
|
err := fn()
|
||||||
|
if err != unix.EINTR {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
|
||||||
|
func syscallMode(i FileMode) (o FileMode) {
|
||||||
|
o |= i.Perm()
|
||||||
|
if i&ModeSetuid != 0 {
|
||||||
|
o |= unix.S_ISUID
|
||||||
|
}
|
||||||
|
if i&ModeSetgid != 0 {
|
||||||
|
o |= unix.S_ISGID
|
||||||
|
}
|
||||||
|
if i&ModeSticky != 0 {
|
||||||
|
o |= unix.S_ISVTX
|
||||||
|
}
|
||||||
|
// No mapping for Go's ModeTemporary (plan9 only).
|
||||||
|
return
|
||||||
|
}
|
||||||
168
internal/ufs/filesystem.go
Normal file
168
internal/ufs/filesystem.go
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Filesystem represents a filesystem capable of performing I/O operations.
|
||||||
|
type Filesystem interface {
|
||||||
|
// Chmod changes the mode of the named file to mode.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the mode of the link's target.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// A different subset of the mode bits are used, depending on the
|
||||||
|
// operating system.
|
||||||
|
//
|
||||||
|
// On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and
|
||||||
|
// ModeSticky are used.
|
||||||
|
//
|
||||||
|
// On Windows, only the 0200 bit (owner writable) of mode is used; it
|
||||||
|
// controls whether the file's read-only attribute is set or cleared.
|
||||||
|
// The other bits are currently unused. For compatibility with Go 1.12
|
||||||
|
// and earlier, use a non-zero mode. Use mode 0400 for a read-only
|
||||||
|
// file and 0600 for a readable+writable file.
|
||||||
|
//
|
||||||
|
// On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive,
|
||||||
|
// and ModeTemporary are used.
|
||||||
|
Chmod(name string, mode FileMode) error
|
||||||
|
|
||||||
|
// Chown changes the numeric uid and gid of the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the uid and gid of the link's target.
|
||||||
|
// A uid or gid of -1 means to not change that value.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or
|
||||||
|
// EPLAN9 error, wrapped in *PathError.
|
||||||
|
Chown(name string, uid, gid int) error
|
||||||
|
|
||||||
|
// Lchown changes the numeric uid and gid of the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the uid and gid of the link itself.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// On Windows, it always returns the syscall.EWINDOWS error, wrapped
|
||||||
|
// in *PathError.
|
||||||
|
Lchown(name string, uid, gid int) error
|
||||||
|
|
||||||
|
// Chtimes changes the access and modification times of the named
|
||||||
|
// file, similar to the Unix utime() or utimes() functions.
|
||||||
|
//
|
||||||
|
// The underlying filesystem may truncate or round the values to a
|
||||||
|
// less precise time unit.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Chtimes(name string, atime, mtime time.Time) error
|
||||||
|
|
||||||
|
// Create creates or truncates the named file. If the file already exists,
|
||||||
|
// it is truncated.
|
||||||
|
//
|
||||||
|
// If the file does not exist, it is created with mode 0666
|
||||||
|
// (before umask). If successful, methods on the returned File can
|
||||||
|
// be used for I/O; the associated file descriptor has mode O_RDWR.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Create(name string) (File, error)
|
||||||
|
|
||||||
|
// Mkdir creates a new directory with the specified name and permission
|
||||||
|
// bits (before umask).
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Mkdir(name string, perm FileMode) error
|
||||||
|
|
||||||
|
// MkdirAll creates a directory named path, along with any necessary
|
||||||
|
// parents, and returns nil, or else returns an error.
|
||||||
|
//
|
||||||
|
// The permission bits perm (before umask) are used for all
|
||||||
|
// directories that MkdirAll creates.
|
||||||
|
// If path is already a directory, MkdirAll does nothing
|
||||||
|
// and returns nil.
|
||||||
|
MkdirAll(path string, perm FileMode) error
|
||||||
|
|
||||||
|
// Open opens the named file for reading.
|
||||||
|
//
|
||||||
|
// If successful, methods on the returned file can be used for reading; the
|
||||||
|
// associated file descriptor has mode O_RDONLY.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Open(name string) (File, error)
|
||||||
|
|
||||||
|
// OpenFile is the generalized open call; most users will use Open
|
||||||
|
// or Create instead. It opens the named file with specified flag
|
||||||
|
// (O_RDONLY etc.).
|
||||||
|
//
|
||||||
|
// If the file does not exist, and the O_CREATE flag
|
||||||
|
// is passed, it is created with mode perm (before umask). If successful,
|
||||||
|
// methods on the returned File can be used for I/O.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
OpenFile(name string, flag int, perm FileMode) (File, error)
|
||||||
|
|
||||||
|
// ReadDir reads the named directory,
|
||||||
|
//
|
||||||
|
// returning all its directory entries sorted by filename.
|
||||||
|
// If an error occurs reading the directory, ReadDir returns the entries it
|
||||||
|
// was able to read before the error, along with the error.
|
||||||
|
ReadDir(name string) ([]DirEntry, error)
|
||||||
|
|
||||||
|
// Remove removes the named file or (empty) directory.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Remove(name string) error
|
||||||
|
|
||||||
|
// RemoveAll removes path and any children it contains.
|
||||||
|
//
|
||||||
|
// It removes everything it can but returns the first error
|
||||||
|
// it encounters. If the path does not exist, RemoveAll
|
||||||
|
// returns nil (no error).
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
RemoveAll(path string) error
|
||||||
|
|
||||||
|
// Rename renames (moves) oldpath to newpath.
|
||||||
|
//
|
||||||
|
// If newpath already exists and is not a directory, Rename replaces it.
|
||||||
|
// OS-specific restrictions may apply when oldpath and newpath are in different directories.
|
||||||
|
// Even within the same directory, on non-Unix platforms Rename is not an atomic operation.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *LinkError.
|
||||||
|
Rename(oldname, newname string) error
|
||||||
|
|
||||||
|
// Stat returns a FileInfo describing the named file.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Stat(name string) (FileInfo, error)
|
||||||
|
|
||||||
|
// Lstat returns a FileInfo describing the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, the returned FileInfo
|
||||||
|
// describes the symbolic link. Lstat makes no attempt to follow the link.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Lstat(name string) (FileInfo, error)
|
||||||
|
|
||||||
|
// Symlink creates newname as a symbolic link to oldname.
|
||||||
|
//
|
||||||
|
// On Windows, a symlink to a non-existent oldname creates a file symlink;
|
||||||
|
// if oldname is later created as a directory the symlink will not work.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *LinkError.
|
||||||
|
Symlink(oldname, newname string) error
|
||||||
|
|
||||||
|
// WalkDir walks the file tree rooted at root, calling fn for each file or
|
||||||
|
// directory in the tree, including root.
|
||||||
|
//
|
||||||
|
// All errors that arise visiting files and directories are filtered by fn:
|
||||||
|
// see the [WalkDirFunc] documentation for details.
|
||||||
|
//
|
||||||
|
// The files are walked in lexical order, which makes the output deterministic
|
||||||
|
// but requires WalkDir to read an entire directory into memory before proceeding
|
||||||
|
// to walk that directory.
|
||||||
|
//
|
||||||
|
// WalkDir does not follow symbolic links found in directories,
|
||||||
|
// but if root itself is a symbolic link, its target will be walked.
|
||||||
|
WalkDir(root string, fn WalkDirFunc) error
|
||||||
|
}
|
||||||
169
internal/ufs/fs_quota.go
Normal file
169
internal/ufs/fs_quota.go
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Quota is a wrapper around [*UnixFS] that provides the ability to limit the
|
||||||
|
// disk usage of the filesystem.
|
||||||
|
//
|
||||||
|
// NOTE: this is not a full complete quota filesystem, it provides utilities for
|
||||||
|
// tracking and checking the usage of the filesystem. The only operation that is
|
||||||
|
// automatically accounted against the quota are file deletions.
|
||||||
|
type Quota struct {
|
||||||
|
// fs is the underlying filesystem that runs the actual I/O operations.
|
||||||
|
*UnixFS
|
||||||
|
|
||||||
|
// limit is the size limit of the filesystem.
|
||||||
|
//
|
||||||
|
// limit is atomic to allow the limit to be safely changed after the
|
||||||
|
// filesystem was created.
|
||||||
|
//
|
||||||
|
// A limit of `-1` disables any write operation from being performed.
|
||||||
|
// A limit of `0` disables any limit checking.
|
||||||
|
limit atomic.Int64
|
||||||
|
|
||||||
|
// usage is the current usage of the filesystem.
|
||||||
|
//
|
||||||
|
// If usage is set to `-1`, it hasn't been calculated yet.
|
||||||
|
usage atomic.Int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQuota creates a new Quota filesystem using an existing UnixFS and a limit.
|
||||||
|
func NewQuota(fs *UnixFS, limit int64) *Quota {
|
||||||
|
qfs := Quota{UnixFS: fs}
|
||||||
|
qfs.limit.Store(limit)
|
||||||
|
return &qfs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the filesystem.
|
||||||
|
func (fs *Quota) Close() (err error) {
|
||||||
|
err = fs.UnixFS.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit returns the limit of the filesystem.
|
||||||
|
func (fs *Quota) Limit() int64 {
|
||||||
|
return fs.limit.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLimit returns the limit of the filesystem.
|
||||||
|
func (fs *Quota) SetLimit(newLimit int64) int64 {
|
||||||
|
return fs.limit.Swap(newLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage returns the current usage of the filesystem.
|
||||||
|
func (fs *Quota) Usage() int64 {
|
||||||
|
return fs.usage.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage updates the total usage of the filesystem.
|
||||||
|
func (fs *Quota) SetUsage(newUsage int64) int64 {
|
||||||
|
return fs.usage.Swap(newUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds `i` to the tracked usage total.
|
||||||
|
func (fs *Quota) Add(i int64) int64 {
|
||||||
|
usage := fs.Usage()
|
||||||
|
|
||||||
|
// If adding `i` to the usage will put us below 0, cap it. (`i` can be negative)
|
||||||
|
if usage+i < 0 {
|
||||||
|
fs.usage.Store(0)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return fs.usage.Add(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanFit checks if the given size can fit in the filesystem without exceeding
|
||||||
|
// the limit of the filesystem.
|
||||||
|
func (fs *Quota) CanFit(size int64) bool {
|
||||||
|
// Get the size limit of the filesystem.
|
||||||
|
limit := fs.Limit()
|
||||||
|
switch limit {
|
||||||
|
case -1:
|
||||||
|
// A limit of -1 means no write operations are allowed.
|
||||||
|
return false
|
||||||
|
case 0:
|
||||||
|
// A limit of 0 means unlimited.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Any other limit is a value we need to check.
|
||||||
|
usage := fs.Usage()
|
||||||
|
if usage == -1 {
|
||||||
|
// We don't know what the current usage is yet.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the current usage + the requested size are under the limit of the
|
||||||
|
// filesystem, allow it.
|
||||||
|
if usage+size <= limit {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Welp, the size would exceed the limit of the filesystem, deny it.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the named file or (empty) directory.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type [*PathError].
|
||||||
|
func (fs *Quota) Remove(name string) error {
|
||||||
|
// For information on why this interface is used here, check its
|
||||||
|
// documentation.
|
||||||
|
s, err := fs.RemoveStat(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't reduce the quota's usage as `name` is not a regular file.
|
||||||
|
if !s.Mode().IsRegular() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the size of the deleted file from the quota usage.
|
||||||
|
fs.Add(-s.Size())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAll removes path and any children it contains.
|
||||||
|
//
|
||||||
|
// It removes everything it can but returns the first error
|
||||||
|
// it encounters. If the path does not exist, RemoveAll
|
||||||
|
// returns nil (no error).
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type [*PathError].
|
||||||
|
func (fs *Quota) RemoveAll(name string) error {
|
||||||
|
name, err := fs.unsafePath(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// While removeAll internally checks this, I want to make sure we check it
|
||||||
|
// and return the proper error so our tests can ensure that this will never
|
||||||
|
// be a possibility.
|
||||||
|
if name == "." {
|
||||||
|
return &PathError{
|
||||||
|
Op: "removeall",
|
||||||
|
Path: name,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fs.removeAll(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Quota) removeAll(path string) error {
|
||||||
|
return removeAll(fs, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Quota) unlinkat(dirfd int, name string, flags int) error {
|
||||||
|
if flags == 0 {
|
||||||
|
s, err := fs.Lstatat(dirfd, name)
|
||||||
|
if err == nil && s.Mode().IsRegular() {
|
||||||
|
fs.Add(-s.Size())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fs.UnixFS.unlinkat(dirfd, name, flags)
|
||||||
|
}
|
||||||
895
internal/ufs/fs_unix.go
Normal file
895
internal/ufs/fs_unix.go
Normal file
@ -0,0 +1,895 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnixFS is a filesystem that uses the unix package to make io calls.
|
||||||
|
//
|
||||||
|
// This is used for proper sand-boxing and full control over the exact syscalls
|
||||||
|
// being performed.
|
||||||
|
type UnixFS struct {
|
||||||
|
// basePath is the base path for file operations to take place in.
|
||||||
|
basePath string
|
||||||
|
|
||||||
|
// useOpenat2 controls whether the `openat2` syscall is used instead of the
|
||||||
|
// older `openat` syscall.
|
||||||
|
useOpenat2 bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnixFS creates a new sandboxed unix filesystem. BasePath is used as the
|
||||||
|
// sandbox path, operations on BasePath itself are not allowed, but any
|
||||||
|
// operations on its descendants are. Symlinks pointing outside BasePath are
|
||||||
|
// checked and prevented from enabling an escape in a non-raceable manor.
|
||||||
|
func NewUnixFS(basePath string, useOpenat2 bool) (*UnixFS, error) {
|
||||||
|
basePath = strings.TrimSuffix(basePath, "/")
|
||||||
|
fs := &UnixFS{
|
||||||
|
basePath: basePath,
|
||||||
|
useOpenat2: useOpenat2,
|
||||||
|
}
|
||||||
|
return fs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BasePath returns the base path of the UnixFS sandbox, file operations
|
||||||
|
// pointing outside this path are prohibited and will be blocked by all
|
||||||
|
// operations implemented by UnixFS.
|
||||||
|
func (fs *UnixFS) BasePath() string {
|
||||||
|
return fs.basePath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close releases the file descriptor used to sandbox operations within the
|
||||||
|
// base path of the filesystem.
|
||||||
|
func (fs *UnixFS) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chmod changes the mode of the named file to mode.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the mode of the link's target.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// A different subset of the mode bits are used, depending on the
|
||||||
|
// operating system.
|
||||||
|
//
|
||||||
|
// On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and
|
||||||
|
// ModeSticky are used.
|
||||||
|
//
|
||||||
|
// On Windows, only the 0200 bit (owner writable) of mode is used; it
|
||||||
|
// controls whether the file's read-only attribute is set or cleared.
|
||||||
|
// The other bits are currently unused. For compatibility with Go 1.12
|
||||||
|
// and earlier, use a non-zero mode. Use mode 0400 for a read-only
|
||||||
|
// file and 0600 for a readable+writable file.
|
||||||
|
//
|
||||||
|
// On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive,
|
||||||
|
// and ModeTemporary are used.
|
||||||
|
func (fs *UnixFS) Chmod(name string, mode FileMode) error {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fs.fchmodat("chmod", dirfd, name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chmodat is like Chmod but it takes a dirfd and name instead of a full path.
|
||||||
|
func (fs *UnixFS) Chmodat(dirfd int, name string, mode FileMode) error {
|
||||||
|
return fs.fchmodat("chmodat", dirfd, name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) fchmodat(op string, dirfd int, name string, mode FileMode) error {
|
||||||
|
return ensurePathError(unix.Fchmodat(dirfd, name, uint32(mode), 0), op, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chown changes the numeric uid and gid of the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the uid and gid of the link's target.
|
||||||
|
// A uid or gid of -1 means to not change that value.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or
|
||||||
|
// EPLAN9 error, wrapped in *PathError.
|
||||||
|
func (fs *UnixFS) Chown(name string, uid, gid int) error {
|
||||||
|
return ensurePathError(fs.fchown(name, uid, gid, 0), "chown", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lchown changes the numeric uid and gid of the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the uid and gid of the link itself.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// On Windows, it always returns the syscall.EWINDOWS error, wrapped
|
||||||
|
// in *PathError.
|
||||||
|
func (fs *UnixFS) Lchown(name string, uid, gid int) error {
|
||||||
|
// With AT_SYMLINK_NOFOLLOW, Fchownat acts like Lchown but allows us to
|
||||||
|
// pass a dirfd.
|
||||||
|
return ensurePathError(fs.fchown(name, uid, gid, AT_SYMLINK_NOFOLLOW), "lchown", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fchown is a re-usable Fchownat syscall used by Chown and Lchown.
|
||||||
|
func (fs *UnixFS) fchown(name string, uid, gid, flags int) error {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return unix.Fchownat(dirfd, name, uid, gid, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chownat is like Chown but allows passing an existing directory file
|
||||||
|
// descriptor rather than needing to resolve one.
|
||||||
|
func (fs *UnixFS) Chownat(dirfd int, name string, uid, gid int) error {
|
||||||
|
return ensurePathError(unix.Fchownat(dirfd, name, uid, gid, 0), "chownat", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lchownat is like Lchown but allows passing an existing directory file
|
||||||
|
// descriptor rather than needing to resolve one.
|
||||||
|
func (fs *UnixFS) Lchownat(dirfd int, name string, uid, gid int) error {
|
||||||
|
return ensurePathError(unix.Fchownat(dirfd, name, uid, gid, AT_SYMLINK_NOFOLLOW), "lchownat", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chtimes changes the access and modification times of the named
|
||||||
|
// file, similar to the Unix utime() or utimes() functions.
|
||||||
|
//
|
||||||
|
// The underlying filesystem may truncate or round the values to a
|
||||||
|
// less precise time unit.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Chtimes(name string, atime, mtime time.Time) error {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fs.Chtimesat(dirfd, name, atime, mtime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chtimesat is like Chtimes but allows passing an existing directory file
|
||||||
|
// descriptor rather than needing to resolve one.
|
||||||
|
func (fs *UnixFS) Chtimesat(dirfd int, name string, atime, mtime time.Time) error {
|
||||||
|
var utimes [2]unix.Timespec
|
||||||
|
set := func(i int, t time.Time) {
|
||||||
|
if t.IsZero() {
|
||||||
|
utimes[i] = unix.Timespec{Sec: unix.UTIME_OMIT, Nsec: unix.UTIME_OMIT}
|
||||||
|
} else {
|
||||||
|
utimes[i] = unix.NsecToTimespec(t.UnixNano())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
set(0, atime)
|
||||||
|
set(1, mtime)
|
||||||
|
|
||||||
|
// This does support `AT_SYMLINK_NOFOLLOW` as well if needed.
|
||||||
|
return ensurePathError(unix.UtimesNanoAt(dirfd, name, utimes[0:], 0), "chtimes", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create creates or truncates the named file. If the file already exists,
|
||||||
|
// it is truncated.
|
||||||
|
//
|
||||||
|
// If the file does not exist, it is created with mode 0666
|
||||||
|
// (before umask). If successful, methods on the returned File can
|
||||||
|
// be used for I/O; the associated file descriptor has mode O_RDWR.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Create(name string) (File, error) {
|
||||||
|
return fs.OpenFile(name, O_CREATE|O_WRONLY|O_TRUNC, 0o644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir creates a new directory with the specified name and permission
|
||||||
|
// bits (before umask).
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Mkdir(name string, mode FileMode) error {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fs.mkdirat("mkdir", dirfd, name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) Mkdirat(dirfd int, name string, mode FileMode) error {
|
||||||
|
return fs.mkdirat("mkdirat", dirfd, name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) mkdirat(op string, dirfd int, name string, mode FileMode) error {
|
||||||
|
return ensurePathError(unix.Mkdirat(dirfd, name, uint32(mode)), op, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAll creates a directory named path, along with any necessary
|
||||||
|
// parents, and returns nil, or else returns an error.
|
||||||
|
//
|
||||||
|
// The permission bits perm (before umask) are used for all
|
||||||
|
// directories that MkdirAll creates.
|
||||||
|
// If path is already a directory, MkdirAll does nothing
|
||||||
|
// and returns nil.
|
||||||
|
func (fs *UnixFS) MkdirAll(name string, mode FileMode) error {
|
||||||
|
// Ensure name is somewhat clean before continuing.
|
||||||
|
name, err := fs.unsafePath(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fs.mkdirAll(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens the named file for reading.
|
||||||
|
//
|
||||||
|
// If successful, methods on the returned file can be used for reading; the
|
||||||
|
// associated file descriptor has mode O_RDONLY.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Open(name string) (File, error) {
|
||||||
|
return fs.OpenFile(name, O_RDONLY, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenFile is the generalized open call; most users will use Open
|
||||||
|
// or Create instead. It opens the named file with specified flag
|
||||||
|
// (O_RDONLY etc.).
|
||||||
|
//
|
||||||
|
// If the file does not exist, and the O_CREATE flag
|
||||||
|
// is passed, it is created with mode perm (before umask). If successful,
|
||||||
|
// methods on the returned File can be used for I/O.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) OpenFile(name string, flag int, mode FileMode) (File, error) {
|
||||||
|
fd, err := fs.openFile(name, flag, mode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Do not close `fd` here, it is passed to a file that needs the fd, the
|
||||||
|
// caller of this function is responsible for calling Close() on the File
|
||||||
|
// to release the file descriptor.
|
||||||
|
return os.NewFile(uintptr(fd), name), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) openFile(name string, flag int, mode FileMode) (int, error) {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return fs.openat(dirfd, name, flag, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) OpenFileat(dirfd int, name string, flag int, mode FileMode) (File, error) {
|
||||||
|
fd, err := fs.openat(dirfd, name, flag, mode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Do not close `fd` here, it is passed to a file that needs the fd, the
|
||||||
|
// caller of this function is responsible for calling Close() on the File
|
||||||
|
// to release the file descriptor.
|
||||||
|
return os.NewFile(uintptr(fd), name), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDir reads the named directory,
|
||||||
|
//
|
||||||
|
// returning all its directory entries sorted by filename.
|
||||||
|
// If an error occurs reading the directory, ReadDir returns the entries it
|
||||||
|
// was able to read before the error, along with the error.
|
||||||
|
func (fs *UnixFS) ReadDir(path string) ([]DirEntry, error) {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(path)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fd, err := fs.openat(dirfd, name, O_DIRECTORY|O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = unix.Close(fd)
|
||||||
|
}()
|
||||||
|
return fs.readDir(fd, name, ".", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveStat is a combination of Stat and Remove, it is used to more
|
||||||
|
// efficiently remove a file when the caller needs to stat it before
|
||||||
|
// removing it.
|
||||||
|
//
|
||||||
|
// This optimized function exists for our QuotaFS abstraction, which needs
|
||||||
|
// to track writes to a filesystem. When removing a file, the QuotaFS needs
|
||||||
|
// to know if the entry is a file and if so, how large it is. Because we
|
||||||
|
// need to Stat a file in order to get its mode and size, we will already
|
||||||
|
// know if the entry needs to be removed by using Unlink or Rmdir. The
|
||||||
|
// standard `Remove` method just tries both Unlink and Rmdir (in that order)
|
||||||
|
// as it ends up usually being faster and more efficient than calling Stat +
|
||||||
|
// the proper operation in the first place.
|
||||||
|
func (fs *UnixFS) RemoveStat(name string) (FileInfo, error) {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lstat name, we use Lstat as Unlink doesn't care about symlinks.
|
||||||
|
s, err := fs.Lstatat(dirfd, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsDir() {
|
||||||
|
err = fs.unlinkat(dirfd, name, AT_REMOVEDIR) // Rmdir
|
||||||
|
} else {
|
||||||
|
err = fs.unlinkat(dirfd, name, 0)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return s, ensurePathError(err, "rename", name)
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the named file or (empty) directory.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Remove(name string) error {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent trying to Remove the base directory.
|
||||||
|
if name == "." {
|
||||||
|
return &PathError{
|
||||||
|
Op: "remove",
|
||||||
|
Path: name,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// System call interface forces us to know
|
||||||
|
// whether name is a file or directory.
|
||||||
|
// Try both: it is cheaper on average than
|
||||||
|
// doing a Stat plus the right one.
|
||||||
|
err = fs.unlinkat(dirfd, name, 0)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err1 := fs.unlinkat(dirfd, name, AT_REMOVEDIR) // Rmdir
|
||||||
|
if err1 == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Both failed: figure out which error to return.
|
||||||
|
// OS X and Linux differ on whether unlink(dir)
|
||||||
|
// returns EISDIR, so can't use that. However,
|
||||||
|
// both agree that rmdir(file) returns ENOTDIR,
|
||||||
|
// so we can use that to decide which error is real.
|
||||||
|
// Rmdir might also return ENOTDIR if given a bad
|
||||||
|
// file path, like /etc/passwd/foo, but in that case,
|
||||||
|
// both errors will be ENOTDIR, so it's okay to
|
||||||
|
// use the error from unlink.
|
||||||
|
if err1 != unix.ENOTDIR {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
return ensurePathError(err, "remove", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAll removes path and any children it contains.
|
||||||
|
//
|
||||||
|
// It removes everything it can but returns the first error
|
||||||
|
// it encounters. If the path does not exist, RemoveAll
|
||||||
|
// returns nil (no error).
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) RemoveAll(name string) error {
|
||||||
|
name, err := fs.unsafePath(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// While removeAll internally checks this, I want to make sure we check it
|
||||||
|
// and return the proper error so our tests can ensure that this will never
|
||||||
|
// be a possibility.
|
||||||
|
if name == "." {
|
||||||
|
return &PathError{
|
||||||
|
Op: "removeall",
|
||||||
|
Path: name,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.removeAll(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveContents recursively removes the contents of name.
|
||||||
|
//
|
||||||
|
// It removes everything it can but returns the first error
|
||||||
|
// it encounters. If the path does not exist, RemoveContents
|
||||||
|
// returns nil (no error).
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type [*PathError].
|
||||||
|
func (fs *UnixFS) RemoveContents(name string) error {
|
||||||
|
name, err := fs.unsafePath(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlike RemoveAll, we don't remove `name` itself, only it's contents.
|
||||||
|
// So there is no need to check for a name of `.` here.
|
||||||
|
|
||||||
|
return fs.removeContents(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) unlinkat(dirfd int, name string, flags int) error {
|
||||||
|
return ignoringEINTR(func() error {
|
||||||
|
return unix.Unlinkat(dirfd, name, flags)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rename renames (moves) oldpath to newpath.
|
||||||
|
//
|
||||||
|
// If newpath already exists and is not a directory, Rename replaces it.
|
||||||
|
// OS-specific restrictions may apply when oldpath and newpath are in different directories.
|
||||||
|
// Even within the same directory, on non-Unix platforms Rename is not an atomic operation.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *LinkError.
|
||||||
|
func (fs *UnixFS) Rename(oldpath, newpath string) error {
|
||||||
|
// Simple case: both paths are the same.
|
||||||
|
if oldpath == newpath {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
olddirfd, oldname, closeFd, err := fs.safePath(oldpath)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Ensure that we are not trying to rename the base directory itself.
|
||||||
|
// While unix.Renameat ends up throwing a "device or resource busy" error,
|
||||||
|
// that doesn't mean we are protecting the system properly.
|
||||||
|
if oldname == "." {
|
||||||
|
return &PathError{
|
||||||
|
Op: "rename",
|
||||||
|
Path: oldname,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Stat the old target to return proper errors.
|
||||||
|
if _, err := fs.Lstatat(olddirfd, oldname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newdirfd, newname, closeFd2, err := fs.safePath(newpath)
|
||||||
|
if err != nil {
|
||||||
|
closeFd2()
|
||||||
|
if !errors.Is(err, ErrNotExist) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var pathErr *PathError
|
||||||
|
if !errors.As(err, &pathErr) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := fs.MkdirAll(pathErr.Path, 0o755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
newdirfd, newname, closeFd2, err = fs.safePath(newpath)
|
||||||
|
defer closeFd2()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
defer closeFd2()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that we are not trying to rename the base directory itself.
|
||||||
|
// While unix.Renameat ends up throwing a "device or resource busy" error,
|
||||||
|
// that doesn't mean we are protecting the system properly.
|
||||||
|
if newname == "." {
|
||||||
|
return &PathError{
|
||||||
|
Op: "rename",
|
||||||
|
Path: newname,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Stat the new target to return proper errors.
|
||||||
|
_, err = fs.Lstatat(newdirfd, newname)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
return &PathError{
|
||||||
|
Op: "rename",
|
||||||
|
Path: newname,
|
||||||
|
Err: ErrExist,
|
||||||
|
}
|
||||||
|
case !errors.Is(err, ErrNotExist):
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := unix.Renameat(olddirfd, oldname, newdirfd, newname); err != nil {
|
||||||
|
return &LinkError{Op: "rename", Old: oldpath, New: newpath, Err: err}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns a FileInfo describing the named file.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Stat(name string) (FileInfo, error) {
|
||||||
|
return fs._fstat("stat", name, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Statat is like Stat but allows passing an existing directory file
|
||||||
|
// descriptor rather than needing to resolve one.
|
||||||
|
func (fs *UnixFS) Statat(dirfd int, name string) (FileInfo, error) {
|
||||||
|
return fs._fstatat("statat", dirfd, name, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lstat returns a FileInfo describing the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, the returned FileInfo
|
||||||
|
// describes the symbolic link. Lstat makes no attempt to follow the link.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Lstat(name string) (FileInfo, error) {
|
||||||
|
return fs._fstat("lstat", name, AT_SYMLINK_NOFOLLOW)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lstatat is like Lstat but allows passing an existing directory file
|
||||||
|
// descriptor rather than needing to resolve one.
|
||||||
|
func (fs *UnixFS) Lstatat(dirfd int, name string) (FileInfo, error) {
|
||||||
|
return fs._fstatat("lstatat", dirfd, name, AT_SYMLINK_NOFOLLOW)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) fstat(name string, flags int) (FileInfo, error) {
|
||||||
|
return fs._fstat("fstat", name, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) _fstat(op string, name string, flags int) (FileInfo, error) {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fs._fstatat(op, dirfd, name, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) fstatat(dirfd int, name string, flags int) (FileInfo, error) {
|
||||||
|
return fs._fstatat("fstatat", dirfd, name, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) _fstatat(op string, dirfd int, name string, flags int) (FileInfo, error) {
|
||||||
|
var s fileStat
|
||||||
|
if err := ignoringEINTR(func() error {
|
||||||
|
return unix.Fstatat(dirfd, name, &s.sys, flags)
|
||||||
|
}); err != nil {
|
||||||
|
return nil, ensurePathError(err, op, name)
|
||||||
|
}
|
||||||
|
fillFileStatFromSys(&s, name)
|
||||||
|
return &s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Symlink creates newname as a symbolic link to oldname.
|
||||||
|
//
|
||||||
|
// On Windows, a symlink to a non-existent oldname creates a file symlink;
|
||||||
|
// if oldname is later created as a directory the symlink will not work.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *LinkError.
|
||||||
|
func (fs *UnixFS) Symlink(oldpath, newpath string) error {
|
||||||
|
dirfd, newpath, closeFd, err := fs.safePath(newpath)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ignoringEINTR(func() error {
|
||||||
|
// We aren't concerned with oldpath here as a symlink can point anywhere
|
||||||
|
// it wants.
|
||||||
|
return unix.Symlinkat(oldpath, dirfd, newpath)
|
||||||
|
}); err != nil {
|
||||||
|
return &LinkError{Op: "symlink", Old: oldpath, New: newpath, Err: err}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Touch will attempt to open a file for reading and/or writing. If the file
|
||||||
|
// does not exist it will be created, and any missing parent directories will
|
||||||
|
// also be created. The opened file may be truncated, only if `flag` has
|
||||||
|
// O_TRUNC set.
|
||||||
|
func (fs *UnixFS) Touch(path string, flag int, mode FileMode) (File, error) {
|
||||||
|
if flag&O_CREATE == 0 {
|
||||||
|
flag |= O_CREATE
|
||||||
|
}
|
||||||
|
dirfd, name, closeFd, err, _ := fs.TouchPath(path)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fs.OpenFileat(dirfd, name, flag, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TouchPath is like SafePath except that it will create any missing directories
|
||||||
|
// in the path. Unlike SafePath, TouchPath returns an additional boolean which
|
||||||
|
// indicates whether the parent directories already existed, this is intended to
|
||||||
|
// be used as a way to know if the final destination could already exist.
|
||||||
|
func (fs *UnixFS) TouchPath(path string) (int, string, func(), error, bool) {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(path)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
return dirfd, name, closeFd, nil, true
|
||||||
|
case !errors.Is(err, ErrNotExist):
|
||||||
|
return dirfd, name, closeFd, err, false
|
||||||
|
}
|
||||||
|
|
||||||
|
var pathErr *PathError
|
||||||
|
if !errors.As(err, &pathErr) {
|
||||||
|
return dirfd, name, closeFd, err, false
|
||||||
|
}
|
||||||
|
if err := fs.MkdirAll(pathErr.Path, 0o755); err != nil {
|
||||||
|
return dirfd, name, closeFd, err, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the previous file descriptor since we are going to be opening
|
||||||
|
// a new one.
|
||||||
|
closeFd()
|
||||||
|
|
||||||
|
// Run safe path again now that the parent directories have been created.
|
||||||
|
dirfd, name, closeFd, err = fs.safePath(path)
|
||||||
|
return dirfd, name, closeFd, err, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// WalkDir walks the file tree rooted at root, calling fn for each file or
|
||||||
|
// directory in the tree, including root.
|
||||||
|
//
|
||||||
|
// All errors that arise visiting files and directories are filtered by fn:
|
||||||
|
// see the [WalkDirFunc] documentation for details.
|
||||||
|
//
|
||||||
|
// The files are walked in lexical order, which makes the output deterministic
|
||||||
|
// but requires WalkDir to read an entire directory into memory before proceeding
|
||||||
|
// to walk that directory.
|
||||||
|
//
|
||||||
|
// WalkDir does not follow symbolic links found in directories,
|
||||||
|
// but if root itself is a symbolic link, its target will be walked.
|
||||||
|
func (fs *UnixFS) WalkDir(root string, fn WalkDirFunc) error {
|
||||||
|
return WalkDir(fs, root, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// openat is a wrapper around both unix.Openat and unix.Openat2. If the UnixFS
|
||||||
|
// was configured to enable openat2 support, unix.Openat2 will be used instead
|
||||||
|
// of unix.Openat due to having better security properties for our use-case.
|
||||||
|
func (fs *UnixFS) openat(dirfd int, name string, flag int, mode FileMode) (int, error) {
|
||||||
|
if flag&O_NOFOLLOW == 0 {
|
||||||
|
flag |= O_NOFOLLOW
|
||||||
|
}
|
||||||
|
|
||||||
|
var fd int
|
||||||
|
for {
|
||||||
|
var err error
|
||||||
|
if fs.useOpenat2 {
|
||||||
|
fd, err = fs._openat2(dirfd, name, uint64(flag), uint64(syscallMode(mode)))
|
||||||
|
} else {
|
||||||
|
fd, err = fs._openat(dirfd, name, flag, uint32(syscallMode(mode)))
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// We have to check EINTR here, per issues https://go.dev/issue/11180 and https://go.dev/issue/39237.
|
||||||
|
if err == unix.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are using openat2, we don't need the additional security checks.
|
||||||
|
if fs.useOpenat2 {
|
||||||
|
return fd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are not using openat2, do additional path checking. This assumes
|
||||||
|
// that openat2 is using `RESOLVE_BENEATH` to avoid the same security
|
||||||
|
// issue.
|
||||||
|
var finalPath string
|
||||||
|
finalPath, err := filepath.EvalSymlinks(filepath.Join("/proc/self/fd/", strconv.Itoa(fd)))
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, ErrNotExist) {
|
||||||
|
return fd, fmt.Errorf("failed to evaluate symlink: %w", convertErrorType(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// The target of one of the symlinks (EvalSymlinks is recursive)
|
||||||
|
// does not exist. So get the path that does not exist and use
|
||||||
|
// that for further validation instead.
|
||||||
|
var pErr *PathError
|
||||||
|
if !errors.As(err, &pErr) {
|
||||||
|
return fd, fmt.Errorf("failed to evaluate symlink: %w", convertErrorType(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the final path to whatever directory or path didn't exist while
|
||||||
|
// recursing any symlinks.
|
||||||
|
finalPath = pErr.Path
|
||||||
|
// Ensure the error is wrapped correctly.
|
||||||
|
err = convertErrorType(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the path is within our root.
|
||||||
|
if !fs.unsafeIsPathInsideOfBase(finalPath) {
|
||||||
|
op := "openat"
|
||||||
|
if fs.useOpenat2 {
|
||||||
|
op = "openat2"
|
||||||
|
}
|
||||||
|
return fd, &PathError{
|
||||||
|
Op: op,
|
||||||
|
Path: name,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the file descriptor and any potential error.
|
||||||
|
return fd, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// _openat is a wrapper around unix.Openat. This method should never be directly
|
||||||
|
// called, use `openat` instead.
|
||||||
|
func (fs *UnixFS) _openat(dirfd int, name string, flag int, mode uint32) (int, error) {
|
||||||
|
// Ensure the O_CLOEXEC flag is set.
|
||||||
|
// Go sets this in the os package, but since we are directly using unix
|
||||||
|
// we need to set it ourselves.
|
||||||
|
if flag&O_CLOEXEC == 0 {
|
||||||
|
flag |= O_CLOEXEC
|
||||||
|
}
|
||||||
|
// O_LARGEFILE is set by Openat for us automatically.
|
||||||
|
fd, err := unix.Openat(dirfd, name, flag, mode)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
return fd, nil
|
||||||
|
case err == unix.EINTR:
|
||||||
|
return fd, err
|
||||||
|
case err == unix.EAGAIN:
|
||||||
|
return fd, err
|
||||||
|
default:
|
||||||
|
return fd, ensurePathError(err, "openat", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// _openat2 is a wonderful syscall that supersedes the `openat` syscall. It has
|
||||||
|
// improved validation and security characteristics that weren't available or
|
||||||
|
// considered when `openat` was originally implemented. As such, it is only
|
||||||
|
// present in Kernel 5.6 and above.
|
||||||
|
//
|
||||||
|
// This method should never be directly called, use `openat` instead.
|
||||||
|
func (fs *UnixFS) _openat2(dirfd int, name string, flag, mode uint64) (int, error) {
|
||||||
|
// Ensure the O_CLOEXEC flag is set.
|
||||||
|
// Go sets this when using the os package, but since we are directly using
|
||||||
|
// the unix package we need to set it ourselves.
|
||||||
|
if flag&O_CLOEXEC == 0 {
|
||||||
|
flag |= O_CLOEXEC
|
||||||
|
}
|
||||||
|
// Ensure the O_LARGEFILE flag is set.
|
||||||
|
// Go sets this for unix.Open, unix.Openat, but not unix.Openat2.
|
||||||
|
if flag&O_LARGEFILE == 0 {
|
||||||
|
flag |= O_LARGEFILE
|
||||||
|
}
|
||||||
|
fd, err := unix.Openat2(dirfd, name, &unix.OpenHow{
|
||||||
|
Flags: flag,
|
||||||
|
Mode: mode,
|
||||||
|
// This is the bread and butter of preventing a symlink escape, without
|
||||||
|
// this option, we have to handle path validation fully on our own.
|
||||||
|
//
|
||||||
|
// This is why using Openat2 over Openat is preferred if available.
|
||||||
|
Resolve: unix.RESOLVE_BENEATH,
|
||||||
|
})
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
return fd, nil
|
||||||
|
case err == unix.EINTR:
|
||||||
|
return fd, err
|
||||||
|
case err == unix.EAGAIN:
|
||||||
|
return fd, err
|
||||||
|
default:
|
||||||
|
return fd, ensurePathError(err, "openat2", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) SafePath(path string) (int, string, func(), error) {
|
||||||
|
return fs.safePath(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) safePath(path string) (dirfd int, file string, closeFd func(), err error) {
|
||||||
|
// Default closeFd to a NO-OP.
|
||||||
|
closeFd = func() {}
|
||||||
|
|
||||||
|
// Use unsafePath to clean the path and strip BasePath if path is absolute.
|
||||||
|
var name string
|
||||||
|
name, err = fs.unsafePath(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open the base path. We use this as the sandbox root for any further
|
||||||
|
// operations.
|
||||||
|
var fsDirfd int
|
||||||
|
fsDirfd, err = fs._openat(AT_EMPTY_PATH, fs.basePath, O_DIRECTORY|O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split the parent from the last element in the path, this gives us the
|
||||||
|
// "file name" and the full path to its parent.
|
||||||
|
var dir string
|
||||||
|
dir, file = filepath.Split(name)
|
||||||
|
// If dir is empty then name is not nested.
|
||||||
|
if dir == "" {
|
||||||
|
dirfd = fsDirfd
|
||||||
|
closeFd = func() { _ = unix.Close(dirfd) }
|
||||||
|
|
||||||
|
// Return dirfd, name, an empty closeFd func, and no error
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dir will usually contain a trailing slash as filepath.Split doesn't
|
||||||
|
// trim slashes.
|
||||||
|
dir = strings.TrimSuffix(dir, "/")
|
||||||
|
dirfd, err = fs.openat(fsDirfd, dir, O_DIRECTORY|O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
// An error occurred while opening the directory, but we already opened
|
||||||
|
// the filesystem root, so we still need to ensure it gets closed.
|
||||||
|
closeFd = func() { _ = unix.Close(fsDirfd) }
|
||||||
|
} else {
|
||||||
|
// Set closeFd to close the newly opened directory file descriptor.
|
||||||
|
closeFd = func() {
|
||||||
|
_ = unix.Close(dirfd)
|
||||||
|
_ = unix.Close(fsDirfd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return dirfd, name, the closeFd func, and err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsafePath strips and joins the given path with the filesystem's base path,
|
||||||
|
// cleaning the result. The cleaned path is then checked if it starts with the
|
||||||
|
// filesystem's base path to obvious any obvious path traversal escapes. The
|
||||||
|
// fully resolved path (if symlinks are followed) may not be within the
|
||||||
|
// filesystem's base path, additional checks are required to safely use paths
|
||||||
|
// returned by this function.
|
||||||
|
func (fs *UnixFS) unsafePath(path string) (string, error) {
|
||||||
|
// Calling filepath.Clean on the path will resolve it to it's absolute path,
|
||||||
|
// removing any path traversal arguments (such as ..), leaving us with an
|
||||||
|
// absolute path we can then use.
|
||||||
|
//
|
||||||
|
// This will also trim the filesystem's base path from the given path and
|
||||||
|
// join the base path back on to ensure the path starts with the base path
|
||||||
|
// without appending it twice.
|
||||||
|
r := filepath.Clean(filepath.Join(fs.basePath, strings.TrimPrefix(path, fs.basePath)))
|
||||||
|
|
||||||
|
if fs.unsafeIsPathInsideOfBase(r) {
|
||||||
|
// This is kinda ironic isn't it.
|
||||||
|
// We do this as we are operating with dirfds and `*at` syscalls which
|
||||||
|
// behave differently if given an absolute path.
|
||||||
|
//
|
||||||
|
// First trim the BasePath, then trim any leading slashes.
|
||||||
|
r = strings.TrimPrefix(strings.TrimPrefix(r, fs.basePath), "/")
|
||||||
|
// If the path is empty then return "." as the path is pointing to the
|
||||||
|
// root.
|
||||||
|
if r == "" {
|
||||||
|
return ".", nil
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", &PathError{
|
||||||
|
Op: "safePath",
|
||||||
|
Path: path,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsafeIsPathInsideOfBase checks if the given path is inside the filesystem's
|
||||||
|
// base path.
|
||||||
|
//
|
||||||
|
// NOTE: this method doesn't clean the given path or attempt to join the
|
||||||
|
// filesystem's base path. This is purely a basic prefix check against the
|
||||||
|
// given path.
|
||||||
|
func (fs *UnixFS) unsafeIsPathInsideOfBase(path string) bool {
|
||||||
|
return strings.HasPrefix(
|
||||||
|
strings.TrimSuffix(path, "/")+"/",
|
||||||
|
fs.basePath+"/",
|
||||||
|
)
|
||||||
|
}
|
||||||
768
internal/ufs/fs_unix_test.go
Normal file
768
internal/ufs/fs_unix_test.go
Normal file
@ -0,0 +1,768 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testUnixFS struct {
|
||||||
|
*ufs.UnixFS
|
||||||
|
|
||||||
|
TmpDir string
|
||||||
|
Root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *testUnixFS) Cleanup() {
|
||||||
|
_ = fs.Close()
|
||||||
|
_ = os.RemoveAll(fs.TmpDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestUnixFS() (*testUnixFS, error) {
|
||||||
|
tmpDir, err := os.MkdirTemp(os.TempDir(), "ufs")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
root := filepath.Join(tmpDir, "root")
|
||||||
|
if err := os.Mkdir(root, 0o755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// fmt.Println(tmpDir)
|
||||||
|
fs, err := ufs.NewUnixFS(root, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tfs := &testUnixFS{
|
||||||
|
UnixFS: fs,
|
||||||
|
TmpDir: tmpDir,
|
||||||
|
Root: root,
|
||||||
|
}
|
||||||
|
return tfs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// Test creating a file within the root.
|
||||||
|
_, _, closeFd, err := fs.SafePath("/")
|
||||||
|
closeFd()
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := fs.Touch("directory/file", ufs.O_RDWR, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Test creating a file within the root.
|
||||||
|
f, err = fs.Create("test")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Test stating a file within the root.
|
||||||
|
if _, err := fs.Stat("test"); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test creating a directory within the root.
|
||||||
|
if err := fs.Mkdir("ima_directory", 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test creating a nested directory within the root.
|
||||||
|
if err := fs.Mkdir("ima_directory/ima_nother_directory", 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test creating a file inside a directory within the root.
|
||||||
|
f, err = fs.Create("ima_directory/ima_file")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Test listing directory entries.
|
||||||
|
if _, err := fs.ReadDir("ima_directory"); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test symlink pointing outside the root.
|
||||||
|
if err := os.Symlink(fs.TmpDir, filepath.Join(fs.Root, "ima_bad_link")); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f, err = fs.Create("ima_bad_link/ima_bad_file")
|
||||||
|
if err == nil {
|
||||||
|
_ = f.Close()
|
||||||
|
t.Error("expected an error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := fs.Mkdir("ima_bad_link/ima_bad_directory", 0o755); err == nil {
|
||||||
|
t.Error("expected an error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test symlink pointing outside the root inside a parent directory.
|
||||||
|
if err := fs.Symlink(fs.TmpDir, filepath.Join(fs.Root, "ima_directory/ima_bad_link")); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := fs.Mkdir("ima_directory/ima_bad_link/ima_bad_directory", 0o755); err == nil {
|
||||||
|
t.Error("expected an error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test symlink pointing outside the root with a child directory.
|
||||||
|
if err := os.Mkdir(filepath.Join(fs.TmpDir, "ima_directory"), 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f, err = fs.Create("ima_bad_link/ima_directory/ima_bad_file")
|
||||||
|
if err == nil {
|
||||||
|
_ = f.Close()
|
||||||
|
t.Error("expected an error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := fs.Mkdir("ima_bad_link/ima_directory/ima_bad_directory", 0o755); err == nil {
|
||||||
|
t.Error("expected an error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := fs.ReadDir("ima_bad_link/ima_directory"); err == nil {
|
||||||
|
t.Error("expected an error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create multiple nested directories.
|
||||||
|
if err := fs.MkdirAll("ima_directory/ima_directory/ima_directory/ima_directory", 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err := fs.ReadDir("ima_directory/ima_directory"); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test creating a directory under a symlink with a pre-existing directory.
|
||||||
|
if err := fs.MkdirAll("ima_bad_link/ima_directory/ima_bad_directory/ima_bad_directory", 0o755); err == nil {
|
||||||
|
t.Error("expected an error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test deletion
|
||||||
|
if err := fs.Remove("test"); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := fs.Remove("ima_bad_link"); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test recursive deletion
|
||||||
|
if err := fs.RemoveAll("ima_directory"); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test recursive deletion underneath a bad symlink
|
||||||
|
if err := fs.Mkdir("ima_directory", 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := fs.Symlink(fs.TmpDir, filepath.Join(fs.Root, "ima_directory/ima_bad_link")); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := fs.RemoveAll("ima_directory/ima_bad_link/ima_bad_file"); err == nil {
|
||||||
|
t.Error("expected an error")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// This should delete the symlink itself.
|
||||||
|
if err := fs.RemoveAll("ima_directory/ima_bad_link"); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//for i := 0; i < 5; i++ {
|
||||||
|
// dirName := "dir" + strconv.Itoa(i)
|
||||||
|
// if err := fs.Mkdir(dirName, 0o755); err != nil {
|
||||||
|
// t.Error(err)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
// for j := 0; j < 5; j++ {
|
||||||
|
// f, err := fs.Create(filepath.Join(dirName, "file"+strconv.Itoa(j)))
|
||||||
|
// if err != nil {
|
||||||
|
// t.Error(err)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
// _ = f.Close()
|
||||||
|
// }
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//if err := fs.WalkDir2("", func(fd int, path string, info filesystem.DirEntry, err error) error {
|
||||||
|
// if err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
// fmt.Println(path)
|
||||||
|
// return nil
|
||||||
|
//}); err != nil {
|
||||||
|
// t.Error(err)
|
||||||
|
// return
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Chmod(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Chown(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Lchown(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Chtimes(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Create(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Mkdir(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_MkdirAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
if err := fs.MkdirAll("/a/bunch/of/directories", 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: stat sanity check
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Open(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_OpenFile(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_ReadDir(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Remove(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
t.Run("base directory", func(t *testing.T) {
|
||||||
|
// Try to remove the base directory.
|
||||||
|
if err := fs.Remove(""); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("path traversal", func(t *testing.T) {
|
||||||
|
// Try to remove the base directory.
|
||||||
|
if err := fs.RemoveAll("../root"); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_RemoveAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
t.Run("base directory", func(t *testing.T) {
|
||||||
|
// Try to remove the base directory.
|
||||||
|
if err := fs.RemoveAll(""); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("path traversal", func(t *testing.T) {
|
||||||
|
// Try to remove the base directory.
|
||||||
|
if err := fs.RemoveAll("../root"); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Rename(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
t.Run("rename base directory", func(t *testing.T) {
|
||||||
|
// Try to rename the base directory.
|
||||||
|
if err := fs.Rename("", "yeet"); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("rename over base directory", func(t *testing.T) {
|
||||||
|
// Create a directory that we are going to try and move over top of the
|
||||||
|
// existing base directory.
|
||||||
|
if err := fs.Mkdir("overwrite_dir", 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to rename over the base directory.
|
||||||
|
if err := fs.Rename("overwrite_dir", ""); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("directory rename", func(t *testing.T) {
|
||||||
|
// Create a directory to rename to something else.
|
||||||
|
if err := fs.Mkdir("test_directory", 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to rename "test_directory" to "directory".
|
||||||
|
if err := fs.Rename("test_directory", "directory"); err != nil {
|
||||||
|
t.Errorf("expected no error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, "directory")); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("file rename", func(t *testing.T) {
|
||||||
|
// Create a directory to rename to something else.
|
||||||
|
f, err := fs.Create("test_file")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Try to rename "test_file" to "file".
|
||||||
|
if err := fs.Rename("test_file", "file"); err != nil {
|
||||||
|
t.Errorf("expected no error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, "file")); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Stat(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Lstat(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Symlink(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Touch(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
t.Run("base directory", func(t *testing.T) {
|
||||||
|
path := "i_touched_a_file"
|
||||||
|
f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("existing parent directory", func(t *testing.T) {
|
||||||
|
dir := "some_parent_directory"
|
||||||
|
if err := fs.Mkdir(dir, 0o755); err != nil {
|
||||||
|
t.Errorf("error creating parent directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
path := filepath.Join(dir, "i_touched_a_file")
|
||||||
|
f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error touching file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("non-existent parent directory", func(t *testing.T) {
|
||||||
|
path := "some_other_directory/i_touched_a_file"
|
||||||
|
f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error touching file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("non-existent parent directories", func(t *testing.T) {
|
||||||
|
path := "some_other_directory/some_directory/i_touched_a_file"
|
||||||
|
f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error touching file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_WalkDir(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
//for i := 0; i < 5; i++ {
|
||||||
|
// dirName := "dir" + strconv.Itoa(i)
|
||||||
|
// if err := fs.Mkdir(dirName, 0o755); err != nil {
|
||||||
|
// t.Error(err)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
// for j := 0; j < 5; j++ {
|
||||||
|
// f, err := fs.Create(filepath.Join(dirName, "file"+strconv.Itoa(j)))
|
||||||
|
// if err != nil {
|
||||||
|
// t.Error(err)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
// _ = f.Close()
|
||||||
|
// }
|
||||||
|
//}
|
||||||
|
//
|
||||||
|
//if err := fs.WalkDir(".", func(path string, info ufs.DirEntry, err error) error {
|
||||||
|
// if err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
// t.Log(path)
|
||||||
|
// return nil
|
||||||
|
//}); err != nil {
|
||||||
|
// t.Error(err)
|
||||||
|
// return
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_WalkDirat(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
dirName := "base" + strconv.Itoa(i)
|
||||||
|
if err := fs.Mkdir(dirName, 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for j := 0; j < 1; j++ {
|
||||||
|
f, err := fs.Create(filepath.Join(dirName, "file"+strconv.Itoa(j)))
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
if err := fs.Mkdir(filepath.Join(dirName, "dir"+strconv.Itoa(j)), 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f, err = fs.Create(filepath.Join(dirName, "dir"+strconv.Itoa(j), "file"+strconv.Itoa(j)))
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("walk starting at the filesystem root", func(t *testing.T) {
|
||||||
|
pathsTraversed, err := fs.testWalkDirAt("")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
expect := []Path{
|
||||||
|
{Name: ".", Relative: "."},
|
||||||
|
{Name: "base0", Relative: "base0"},
|
||||||
|
{Name: "dir0", Relative: "base0/dir0"},
|
||||||
|
{Name: "file0", Relative: "base0/dir0/file0"},
|
||||||
|
{Name: "file0", Relative: "base0/file0"},
|
||||||
|
{Name: "base1", Relative: "base1"},
|
||||||
|
{Name: "dir0", Relative: "base1/dir0"},
|
||||||
|
{Name: "file0", Relative: "base1/dir0/file0"},
|
||||||
|
{Name: "file0", Relative: "base1/file0"},
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(pathsTraversed, expect) {
|
||||||
|
t.Log(pathsTraversed)
|
||||||
|
t.Log(expect)
|
||||||
|
t.Error("walk doesn't match")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("walk starting in a directory", func(t *testing.T) {
|
||||||
|
pathsTraversed, err := fs.testWalkDirAt("base0")
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
expect := []Path{
|
||||||
|
// TODO: what should relative actually be here?
|
||||||
|
// The behaviour differs from walking the directory root vs a sub
|
||||||
|
// directory. When walking from the root, dirfd is the directory we
|
||||||
|
// are walking from and both name and relative are `.`. However,
|
||||||
|
// when walking from a subdirectory, fd is the parent of the
|
||||||
|
// subdirectory, and name is the subdirectory.
|
||||||
|
{Name: "base0", Relative: "."},
|
||||||
|
{Name: "dir0", Relative: "dir0"},
|
||||||
|
{Name: "file0", Relative: "dir0/file0"},
|
||||||
|
{Name: "file0", Relative: "file0"},
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(pathsTraversed, expect) {
|
||||||
|
t.Log(pathsTraversed)
|
||||||
|
t.Log(expect)
|
||||||
|
t.Error("walk doesn't match")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type Path struct {
|
||||||
|
Name string
|
||||||
|
Relative string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *testUnixFS) testWalkDirAt(path string) ([]Path, error) {
|
||||||
|
dirfd, name, closeFd, err := fs.SafePath(path)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var pathsTraversed []Path
|
||||||
|
if err := fs.WalkDirat(dirfd, name, func(_ int, name, relative string, _ ufs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pathsTraversed = append(pathsTraversed, Path{Name: name, Relative: relative})
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
slices.SortStableFunc(pathsTraversed, func(a, b Path) int {
|
||||||
|
if a.Relative > b.Relative {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if a.Relative < b.Relative {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
})
|
||||||
|
return pathsTraversed, nil
|
||||||
|
}
|
||||||
27
internal/ufs/go.LICENSE
Normal file
27
internal/ufs/go.LICENSE
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
63
internal/ufs/mkdir_unix.go
Normal file
63
internal/ufs/mkdir_unix.go
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was derived from `go/src/os/path.go`.
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
// mkdirAll is a recursive Mkdir implementation that properly handles symlinks.
|
||||||
|
func (fs *UnixFS) mkdirAll(name string, mode FileMode) error {
|
||||||
|
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
||||||
|
dir, err := fs.Lstat(name)
|
||||||
|
if err == nil {
|
||||||
|
if dir.Mode()&ModeSymlink != 0 {
|
||||||
|
// If the final path is a symlink, resolve its target and use that
|
||||||
|
// to check instead.
|
||||||
|
dir, err = fs.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dir.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PathError{Op: "mkdir", Path: name, Err: ErrNotDirectory}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slow path: make sure parent exists and then call Mkdir for path.
|
||||||
|
i := len(name)
|
||||||
|
for i > 0 && name[i-1] == '/' { // Skip trailing path separator.
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
|
||||||
|
j := i
|
||||||
|
for j > 0 && name[j-1] != '/' { // Scan backward over element.
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
|
||||||
|
if j > 1 {
|
||||||
|
// Create parent.
|
||||||
|
err = fs.mkdirAll(name[:j-1], mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent now exists; invoke Mkdir and use its result.
|
||||||
|
err = fs.Mkdir(name, mode)
|
||||||
|
if err != nil {
|
||||||
|
// Handle arguments like "foo/." by
|
||||||
|
// double-checking that directory doesn't exist.
|
||||||
|
dir, err1 := fs.Lstat(name)
|
||||||
|
if err1 == nil && dir.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
80
internal/ufs/path_unix.go
Normal file
80
internal/ufs/path_unix.go
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was copied from `go/src/os/path.go`
|
||||||
|
// and `go/src/os/path_unix.go`.
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// basename removes trailing slashes and the leading directory name from path name.
|
||||||
|
func basename(name string) string {
|
||||||
|
i := len(name) - 1
|
||||||
|
// Remove trailing slashes
|
||||||
|
for ; i > 0 && name[i] == '/'; i-- {
|
||||||
|
name = name[:i]
|
||||||
|
}
|
||||||
|
// Remove leading directory name
|
||||||
|
for i--; i >= 0; i-- {
|
||||||
|
if name[i] == '/' {
|
||||||
|
name = name[i+1:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
// endsWithDot reports whether the final component of path is ".".
|
||||||
|
func endsWithDot(path string) bool {
|
||||||
|
if path == "." {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if len(path) >= 2 && path[len(path)-1] == '.' && os.IsPathSeparator(path[len(path)-2]) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitPath returns the base name and parent directory.
|
||||||
|
func splitPath(path string) (string, string) {
|
||||||
|
// if no better parent is found, the path is relative from "here"
|
||||||
|
dirname := "."
|
||||||
|
|
||||||
|
// Remove all but one leading slash.
|
||||||
|
for len(path) > 1 && path[0] == '/' && path[1] == '/' {
|
||||||
|
path = path[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
i := len(path) - 1
|
||||||
|
|
||||||
|
// Remove trailing slashes.
|
||||||
|
for ; i > 0 && path[i] == '/'; i-- {
|
||||||
|
path = path[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// if no slashes in path, base is path
|
||||||
|
basename := path
|
||||||
|
|
||||||
|
// Remove leading directory path
|
||||||
|
for i--; i >= 0; i-- {
|
||||||
|
if path[i] == '/' {
|
||||||
|
if i == 0 {
|
||||||
|
dirname = path[:1]
|
||||||
|
} else {
|
||||||
|
dirname = path[:i]
|
||||||
|
}
|
||||||
|
basename = path[i+1:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dirname, basename
|
||||||
|
}
|
||||||
115
internal/ufs/quota_writer.go
Normal file
115
internal/ufs/quota_writer.go
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CountedWriter is a writer that counts the amount of data written to the
|
||||||
|
// underlying writer.
|
||||||
|
type CountedWriter struct {
|
||||||
|
File
|
||||||
|
|
||||||
|
counter atomic.Int64
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCountedWriter returns a new countedWriter that counts the amount of bytes
|
||||||
|
// written to the underlying writer.
|
||||||
|
func NewCountedWriter(f File) *CountedWriter {
|
||||||
|
return &CountedWriter{File: f}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesWritten returns the amount of bytes that have been written to the
|
||||||
|
// underlying writer.
|
||||||
|
func (w *CountedWriter) BytesWritten() int64 {
|
||||||
|
return w.counter.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the error from the writer if any. If the error is an EOF, nil
|
||||||
|
// will be returned.
|
||||||
|
func (w *CountedWriter) Error() error {
|
||||||
|
if w.err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return w.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes bytes to the underlying writer while tracking the total amount
|
||||||
|
// of bytes written.
|
||||||
|
func (w *CountedWriter) Write(p []byte) (int, error) {
|
||||||
|
if w.err != nil {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write is a very simple operation for us to handle.
|
||||||
|
n, err := w.File.Write(p)
|
||||||
|
w.counter.Add(int64(n))
|
||||||
|
w.err = err
|
||||||
|
|
||||||
|
// TODO: is this how we actually want to handle errors with this?
|
||||||
|
if err == io.EOF {
|
||||||
|
return n, io.EOF
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *CountedWriter) ReadFrom(r io.Reader) (n int64, err error) {
|
||||||
|
cr := NewCountedReader(r)
|
||||||
|
n, err = w.File.ReadFrom(cr)
|
||||||
|
w.counter.Add(n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountedReader is a reader that counts the amount of data read from the
|
||||||
|
// underlying reader.
|
||||||
|
type CountedReader struct {
|
||||||
|
reader io.Reader
|
||||||
|
|
||||||
|
counter atomic.Int64
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ io.Reader = (*CountedReader)(nil)
|
||||||
|
|
||||||
|
// NewCountedReader returns a new countedReader that counts the amount of bytes
|
||||||
|
// read from the underlying reader.
|
||||||
|
func NewCountedReader(r io.Reader) *CountedReader {
|
||||||
|
return &CountedReader{reader: r}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesRead returns the amount of bytes that have been read from the underlying
|
||||||
|
// reader.
|
||||||
|
func (r *CountedReader) BytesRead() int64 {
|
||||||
|
return r.counter.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the error from the reader if any. If the error is an EOF, nil
|
||||||
|
// will be returned.
|
||||||
|
func (r *CountedReader) Error() error {
|
||||||
|
if r.err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads bytes from the underlying reader while tracking the total amount
|
||||||
|
// of bytes read.
|
||||||
|
func (r *CountedReader) Read(p []byte) (int, error) {
|
||||||
|
if r.err != nil {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := r.reader.Read(p)
|
||||||
|
r.counter.Add(int64(n))
|
||||||
|
r.err = err
|
||||||
|
|
||||||
|
// TODO: is this how we actually want to handle errors with this?
|
||||||
|
if err == io.EOF {
|
||||||
|
return n, io.EOF
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
261
internal/ufs/removeall_unix.go
Normal file
261
internal/ufs/removeall_unix.go
Normal file
@ -0,0 +1,261 @@
|
|||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was derived from `go/src/os/removeall_at.go`.
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type unixFS interface {
|
||||||
|
Open(name string) (File, error)
|
||||||
|
Remove(name string) error
|
||||||
|
unlinkat(dirfd int, path string, flags int) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) removeAll(path string) error {
|
||||||
|
return removeAll(fs, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeAll(fs unixFS, path string) error {
|
||||||
|
if path == "" {
|
||||||
|
// fail silently to retain compatibility with previous behavior
|
||||||
|
// of RemoveAll. See issue https://go.dev/issue/28830.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The rmdir system call does not permit removing ".",
|
||||||
|
// so we don't permit it either.
|
||||||
|
if endsWithDot(path) {
|
||||||
|
return &PathError{Op: "removeall", Path: path, Err: unix.EINVAL}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple case: if Remove works, we're done.
|
||||||
|
err := fs.Remove(path)
|
||||||
|
if err == nil || errors.Is(err, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAll recurses by deleting the path base from
|
||||||
|
// its parent directory
|
||||||
|
parentDir, base := splitPath(path)
|
||||||
|
|
||||||
|
parent, err := fs.Open(parentDir)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, ErrNotExist) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// If parent does not exist, base cannot exist. Fail silently
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer parent.Close()
|
||||||
|
|
||||||
|
if err := removeAllFrom(fs, parent, base); err != nil {
|
||||||
|
if pathErr, ok := err.(*PathError); ok {
|
||||||
|
pathErr.Path = parentDir + string(os.PathSeparator) + pathErr.Path
|
||||||
|
err = convertErrorType(pathErr)
|
||||||
|
} else {
|
||||||
|
err = ensurePathError(err, "removeallfrom", base)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) removeContents(path string) error {
|
||||||
|
return removeContents(fs, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeContents(fs unixFS, path string) error {
|
||||||
|
if path == "" {
|
||||||
|
// fail silently to retain compatibility with previous behavior
|
||||||
|
// of RemoveAll. See issue https://go.dev/issue/28830.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAll recurses by deleting the path base from
|
||||||
|
// its parent directory
|
||||||
|
parentDir, base := splitPath(path)
|
||||||
|
|
||||||
|
parent, err := fs.Open(parentDir)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, ErrNotExist) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// If parent does not exist, base cannot exist. Fail silently
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer parent.Close()
|
||||||
|
|
||||||
|
if err := removeContentsFrom(fs, parent, base); err != nil {
|
||||||
|
if pathErr, ok := err.(*PathError); ok {
|
||||||
|
pathErr.Path = parentDir + string(os.PathSeparator) + pathErr.Path
|
||||||
|
err = convertErrorType(pathErr)
|
||||||
|
} else {
|
||||||
|
err = ensurePathError(err, "removecontentsfrom", base)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeContentsFrom recursively removes all descendants of parent without
|
||||||
|
// removing parent itself. Parent must be a directory.
|
||||||
|
func removeContentsFrom(fs unixFS, parent File, base string) error {
|
||||||
|
parentFd := int(parent.Fd())
|
||||||
|
|
||||||
|
var recurseErr error
|
||||||
|
for {
|
||||||
|
const reqSize = 1024
|
||||||
|
var respSize int
|
||||||
|
|
||||||
|
// Open the directory to recurse into
|
||||||
|
file, err := openFdAt(parentFd, base)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
recurseErr = &PathError{Op: "openfdat", Path: base, Err: err}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
numErr := 0
|
||||||
|
|
||||||
|
names, readErr := file.Readdirnames(reqSize)
|
||||||
|
// Errors other than EOF should stop us from continuing.
|
||||||
|
if readErr != nil && readErr != io.EOF {
|
||||||
|
_ = file.Close()
|
||||||
|
if errors.Is(readErr, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PathError{Op: "readdirnames", Path: base, Err: readErr}
|
||||||
|
}
|
||||||
|
|
||||||
|
respSize = len(names)
|
||||||
|
for _, name := range names {
|
||||||
|
err := removeAllFrom(fs, file, name)
|
||||||
|
if err != nil {
|
||||||
|
if pathErr, ok := err.(*PathError); ok {
|
||||||
|
pathErr.Path = base + string(os.PathSeparator) + pathErr.Path
|
||||||
|
}
|
||||||
|
numErr++
|
||||||
|
if recurseErr == nil {
|
||||||
|
recurseErr = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we can delete any entry, break to start new iteration.
|
||||||
|
// Otherwise, we discard current names, get next entries and try deleting them.
|
||||||
|
if numErr != reqSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removing files from the directory may have caused
|
||||||
|
// the OS to reshuffle it. Simply calling Readdirnames
|
||||||
|
// again may skip some entries. The only reliable way
|
||||||
|
// to avoid this is to close and re-open the
|
||||||
|
// directory. See issue https://go.dev/issue/20841.
|
||||||
|
_ = file.Close()
|
||||||
|
|
||||||
|
// Finish when the end of the directory is reached
|
||||||
|
if respSize < reqSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeAllFrom(fs unixFS, parent File, base string) error {
|
||||||
|
parentFd := int(parent.Fd())
|
||||||
|
|
||||||
|
// Simple case: if Unlink (aka remove) works, we're done.
|
||||||
|
err := fs.unlinkat(parentFd, base, 0)
|
||||||
|
if err == nil || errors.Is(err, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EISDIR means that we have a directory, and we need to
|
||||||
|
// remove its contents.
|
||||||
|
// EPERM or EACCES means that we don't have write permission on
|
||||||
|
// the parent directory, but this entry might still be a directory
|
||||||
|
// whose contents need to be removed.
|
||||||
|
// Otherwise, just return the error.
|
||||||
|
if err != unix.EISDIR && err != unix.EPERM && err != unix.EACCES {
|
||||||
|
return &PathError{Op: "unlinkat", Path: base, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is this a directory we need to recurse into?
|
||||||
|
var statInfo unix.Stat_t
|
||||||
|
statErr := ignoringEINTR(func() error {
|
||||||
|
return unix.Fstatat(parentFd, base, &statInfo, AT_SYMLINK_NOFOLLOW)
|
||||||
|
})
|
||||||
|
if statErr != nil {
|
||||||
|
if errors.Is(statErr, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PathError{Op: "fstatat", Path: base, Err: statErr}
|
||||||
|
}
|
||||||
|
if statInfo.Mode&unix.S_IFMT != unix.S_IFDIR {
|
||||||
|
// Not a directory; return the error from the unix.Unlinkat.
|
||||||
|
return &PathError{Op: "unlinkat", Path: base, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all contents will remove the contents of the directory.
|
||||||
|
//
|
||||||
|
// It was split out of this function to allow the deletion of the
|
||||||
|
// contents of a directory, without deleting the directory itself.
|
||||||
|
recurseErr := removeContentsFrom(fs, parent, base)
|
||||||
|
|
||||||
|
// Remove the directory itself.
|
||||||
|
unlinkErr := fs.unlinkat(parentFd, base, AT_REMOVEDIR)
|
||||||
|
if unlinkErr == nil || errors.Is(unlinkErr, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if recurseErr != nil {
|
||||||
|
return recurseErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return ensurePathError(err, "unlinkat", base)
|
||||||
|
}
|
||||||
|
|
||||||
|
// openFdAt opens path relative to the directory in fd.
|
||||||
|
// Other than that this should act like openFileNolog.
|
||||||
|
// This acts like openFileNolog rather than OpenFile because
|
||||||
|
// we are going to (try to) remove the file.
|
||||||
|
// The contents of this file are not relevant for test caching.
|
||||||
|
func openFdAt(dirfd int, name string) (File, error) {
|
||||||
|
var fd int
|
||||||
|
for {
|
||||||
|
var err error
|
||||||
|
fd, err = unix.Openat(dirfd, name, O_RDONLY|O_CLOEXEC|O_NOFOLLOW, 0)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// See comment in openFileNolog.
|
||||||
|
if err == unix.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// This is stupid, os.NewFile immediately casts `fd` to an `int`, but wants
|
||||||
|
// it to be passed as a `uintptr`.
|
||||||
|
return os.NewFile(uintptr(fd), name), nil
|
||||||
|
}
|
||||||
67
internal/ufs/stat_unix.go
Normal file
67
internal/ufs/stat_unix.go
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was copied from `go/src/os/stat_linux.go`
|
||||||
|
// and `go/src/os/types_unix.go`.
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fileStat struct {
|
||||||
|
name string
|
||||||
|
size int64
|
||||||
|
mode FileMode
|
||||||
|
modTime time.Time
|
||||||
|
sys unix.Stat_t
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ FileInfo = (*fileStat)(nil)
|
||||||
|
|
||||||
|
func (fs *fileStat) Size() int64 { return fs.size }
|
||||||
|
func (fs *fileStat) Mode() FileMode { return fs.mode }
|
||||||
|
func (fs *fileStat) ModTime() time.Time { return fs.modTime }
|
||||||
|
func (fs *fileStat) Sys() any { return &fs.sys }
|
||||||
|
func (fs *fileStat) Name() string { return fs.name }
|
||||||
|
func (fs *fileStat) IsDir() bool { return fs.Mode().IsDir() }
|
||||||
|
|
||||||
|
func fillFileStatFromSys(fs *fileStat, name string) {
|
||||||
|
fs.name = basename(name)
|
||||||
|
fs.size = fs.sys.Size
|
||||||
|
fs.modTime = time.Unix(fs.sys.Mtim.Unix())
|
||||||
|
fs.mode = FileMode(fs.sys.Mode & 0o777)
|
||||||
|
switch fs.sys.Mode & unix.S_IFMT {
|
||||||
|
case unix.S_IFBLK:
|
||||||
|
fs.mode |= ModeDevice
|
||||||
|
case unix.S_IFCHR:
|
||||||
|
fs.mode |= ModeDevice | ModeCharDevice
|
||||||
|
case unix.S_IFDIR:
|
||||||
|
fs.mode |= ModeDir
|
||||||
|
case unix.S_IFIFO:
|
||||||
|
fs.mode |= ModeNamedPipe
|
||||||
|
case unix.S_IFLNK:
|
||||||
|
fs.mode |= ModeSymlink
|
||||||
|
case unix.S_IFREG:
|
||||||
|
// nothing to do
|
||||||
|
case unix.S_IFSOCK:
|
||||||
|
fs.mode |= ModeSocket
|
||||||
|
}
|
||||||
|
if fs.sys.Mode&unix.S_ISGID != 0 {
|
||||||
|
fs.mode |= ModeSetgid
|
||||||
|
}
|
||||||
|
if fs.sys.Mode&unix.S_ISUID != 0 {
|
||||||
|
fs.mode |= ModeSetuid
|
||||||
|
}
|
||||||
|
if fs.sys.Mode&unix.S_ISVTX != 0 {
|
||||||
|
fs.mode |= ModeSticky
|
||||||
|
}
|
||||||
|
}
|
||||||
124
internal/ufs/walk.go
Normal file
124
internal/ufs/walk.go
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was derived from `go/src/io/fs/walk.go`.
|
||||||
|
|
||||||
|
// Copyright 2020 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
iofs "io/fs"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SkipDir is used as a return value from [WalkDirFunc] to indicate that
|
||||||
|
// the directory named in the call is to be skipped. It is not returned
|
||||||
|
// as an error by any function.
|
||||||
|
var SkipDir = iofs.SkipDir
|
||||||
|
|
||||||
|
// SkipAll is used as a return value from [WalkDirFunc] to indicate that
|
||||||
|
// all remaining files and directories are to be skipped. It is not returned
|
||||||
|
// as an error by any function.
|
||||||
|
var SkipAll = iofs.SkipAll
|
||||||
|
|
||||||
|
// WalkDirFunc is the type of the function called by [WalkDir] to visit
|
||||||
|
// each file or directory.
|
||||||
|
//
|
||||||
|
// The path argument contains the argument to [WalkDir] as a prefix.
|
||||||
|
// That is, if WalkDir is called with root argument "dir" and finds a file
|
||||||
|
// named "a" in that directory, the walk function will be called with
|
||||||
|
// argument "dir/a".
|
||||||
|
//
|
||||||
|
// The d argument is the [DirEntry] for the named path.
|
||||||
|
//
|
||||||
|
// The error result returned by the function controls how [WalkDir]
|
||||||
|
// continues. If the function returns the special value [SkipDir], WalkDir
|
||||||
|
// skips the current directory (path if d.IsDir() is true, otherwise
|
||||||
|
// path's parent directory). If the function returns the special value
|
||||||
|
// [SkipAll], WalkDir skips all remaining files and directories. Otherwise,
|
||||||
|
// if the function returns a non-nil error, WalkDir stops entirely and
|
||||||
|
// returns that error.
|
||||||
|
//
|
||||||
|
// The err argument reports an error related to path, signaling that
|
||||||
|
// [WalkDir] will not walk into that directory. The function can decide how
|
||||||
|
// to handle that error; as described earlier, returning the error will
|
||||||
|
// cause WalkDir to stop walking the entire tree.
|
||||||
|
//
|
||||||
|
// [WalkDir] calls the function with a non-nil err argument in two cases.
|
||||||
|
//
|
||||||
|
// First, if the initial [Stat] on the root directory fails, WalkDir
|
||||||
|
// calls the function with path set to root, d set to nil, and err set to
|
||||||
|
// the error from [fs.Stat].
|
||||||
|
//
|
||||||
|
// Second, if a directory's ReadDir method (see [ReadDirFile]) fails, WalkDir calls the
|
||||||
|
// function with path set to the directory's path, d set to an
|
||||||
|
// [DirEntry] describing the directory, and err set to the error from
|
||||||
|
// ReadDir. In this second case, the function is called twice with the
|
||||||
|
// path of the directory: the first call is before the directory read is
|
||||||
|
// attempted and has err set to nil, giving the function a chance to
|
||||||
|
// return [SkipDir] or [SkipAll] and avoid the ReadDir entirely. The second call
|
||||||
|
// is after a failed ReadDir and reports the error from ReadDir.
|
||||||
|
// (If ReadDir succeeds, there is no second call.)
|
||||||
|
type WalkDirFunc func(path string, d DirEntry, err error) error
|
||||||
|
|
||||||
|
// WalkDir walks the file tree rooted at root, calling fn for each file or
|
||||||
|
// directory in the tree, including root.
|
||||||
|
//
|
||||||
|
// All errors that arise visiting files and directories are filtered by fn:
|
||||||
|
// see the [WalkDirFunc] documentation for details.
|
||||||
|
//
|
||||||
|
// The files are walked in lexical order, which makes the output deterministic
|
||||||
|
// but requires WalkDir to read an entire directory into memory before proceeding
|
||||||
|
// to walk that directory.
|
||||||
|
//
|
||||||
|
// WalkDir does not follow symbolic links found in directories,
|
||||||
|
// but if root itself is a symbolic link, its target will be walked.
|
||||||
|
func WalkDir(fs Filesystem, root string, fn WalkDirFunc) error {
|
||||||
|
info, err := fs.Stat(root)
|
||||||
|
if err != nil {
|
||||||
|
err = fn(root, nil, err)
|
||||||
|
} else {
|
||||||
|
err = walkDir(fs, root, iofs.FileInfoToDirEntry(info), fn)
|
||||||
|
}
|
||||||
|
if err == SkipDir || err == SkipAll {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// walkDir recursively descends path, calling walkDirFn.
|
||||||
|
func walkDir(fs Filesystem, name string, d DirEntry, walkDirFn WalkDirFunc) error {
|
||||||
|
if err := walkDirFn(name, d, nil); err != nil || !d.IsDir() {
|
||||||
|
if err == SkipDir && d.IsDir() {
|
||||||
|
// Successfully skipped directory.
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dirs, err := fs.ReadDir(name)
|
||||||
|
if err != nil {
|
||||||
|
// Second call, to report ReadDir error.
|
||||||
|
err = walkDirFn(name, d, err)
|
||||||
|
if err != nil {
|
||||||
|
if err == SkipDir && d.IsDir() {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d1 := range dirs {
|
||||||
|
name1 := path.Join(name, d1.Name())
|
||||||
|
if err := walkDir(fs, name1, d1, walkDirFn); err != nil {
|
||||||
|
if err == SkipDir {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
314
internal/ufs/walk_unix.go
Normal file
314
internal/ufs/walk_unix.go
Normal file
@ -0,0 +1,314 @@
|
|||||||
|
// SPDX-License-Identifier: BSD-2-Clause
|
||||||
|
|
||||||
|
// Some code in this file was derived from https://github.com/karrick/godirwalk.
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
iofs "io/fs"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WalkDiratFunc func(dirfd int, name, relative string, d DirEntry, err error) error
|
||||||
|
|
||||||
|
func (fs *UnixFS) WalkDirat(dirfd int, name string, fn WalkDiratFunc) error {
|
||||||
|
info, err := fs.Lstatat(dirfd, name)
|
||||||
|
if err != nil {
|
||||||
|
err = fn(dirfd, name, ".", nil, err)
|
||||||
|
} else {
|
||||||
|
b := newScratchBuffer()
|
||||||
|
err = fs.walkDir(b, dirfd, name, ".", iofs.FileInfoToDirEntry(info), fn)
|
||||||
|
}
|
||||||
|
if err == SkipDir || err == SkipAll {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) walkDir(b []byte, parentfd int, name, relative string, d DirEntry, walkDirFn WalkDiratFunc) error {
|
||||||
|
if err := walkDirFn(parentfd, name, relative, d, nil); err != nil || !d.IsDir() {
|
||||||
|
if err == SkipDir && d.IsDir() {
|
||||||
|
// Successfully skipped directory.
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dirfd, err := fs.openat(parentfd, name, O_DIRECTORY|O_RDONLY, 0)
|
||||||
|
if dirfd != 0 {
|
||||||
|
defer unix.Close(dirfd)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dirs, err := fs.readDir(dirfd, name, relative, b)
|
||||||
|
if err != nil {
|
||||||
|
// Second call, to report ReadDir error.
|
||||||
|
err = walkDirFn(dirfd, name, relative, d, err)
|
||||||
|
if err != nil {
|
||||||
|
if err == SkipDir && d.IsDir() {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d1 := range dirs {
|
||||||
|
name := d1.Name()
|
||||||
|
// This fancy logic ensures that if we start walking from a subdirectory
|
||||||
|
// that we don't make the path relative to the root of the filesystem.
|
||||||
|
//
|
||||||
|
// For example, if we walk from the root of a filesystem, relative would
|
||||||
|
// be "." and path.Join would end up just returning name. But if relative
|
||||||
|
// was a subdirectory, relative could be "dir" and path.Join would make
|
||||||
|
// it "dir/child" even though we are walking starting at dir.
|
||||||
|
var rel string
|
||||||
|
if relative == "." {
|
||||||
|
rel = name
|
||||||
|
} else {
|
||||||
|
rel = path.Join(relative, name)
|
||||||
|
}
|
||||||
|
if err := fs.walkDir(b, dirfd, name, rel, d1, walkDirFn); err != nil {
|
||||||
|
if err == SkipDir {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDirMap .
|
||||||
|
// TODO: document
|
||||||
|
func ReadDirMap[T any](fs *UnixFS, path string, fn func(DirEntry) (T, error)) ([]T, error) {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(path)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fd, err := fs.openat(dirfd, name, O_DIRECTORY|O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer unix.Close(fd)
|
||||||
|
|
||||||
|
entries, err := fs.readDir(fd, ".", path, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]T, len(entries))
|
||||||
|
for i, e := range entries {
|
||||||
|
idx := i
|
||||||
|
e := e
|
||||||
|
v, err := fn(e)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out[idx] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// nameOffset is a compile time constant
|
||||||
|
const nameOffset = int(unsafe.Offsetof(unix.Dirent{}.Name))
|
||||||
|
|
||||||
|
func nameFromDirent(de *unix.Dirent) (name []byte) {
|
||||||
|
// Because this GOOS' syscall.Dirent does not provide a field that specifies
|
||||||
|
// the name length, this function must first calculate the max possible name
|
||||||
|
// length, and then search for the NULL byte.
|
||||||
|
ml := int(de.Reclen) - nameOffset
|
||||||
|
|
||||||
|
// Convert syscall.Dirent.Name, which is array of int8, to []byte, by
|
||||||
|
// overwriting Cap, Len, and Data slice header fields to the max possible
|
||||||
|
// name length computed above, and finding the terminating NULL byte.
|
||||||
|
//
|
||||||
|
// TODO: is there an alternative to the deprecated SliceHeader?
|
||||||
|
// SliceHeader was mainly deprecated due to it being misused for avoiding
|
||||||
|
// allocations when converting a byte slice to a string, ref;
|
||||||
|
// https://go.dev/issue/53003
|
||||||
|
sh := (*reflect.SliceHeader)(unsafe.Pointer(&name))
|
||||||
|
sh.Cap = ml
|
||||||
|
sh.Len = ml
|
||||||
|
sh.Data = uintptr(unsafe.Pointer(&de.Name[0]))
|
||||||
|
|
||||||
|
if index := bytes.IndexByte(name, 0); index >= 0 {
|
||||||
|
// Found NULL byte; set slice's cap and len accordingly.
|
||||||
|
sh.Cap = index
|
||||||
|
sh.Len = index
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: This branch is not expected, but included for defensive
|
||||||
|
// programming, and provides a hard stop on the name based on the structure
|
||||||
|
// field array size.
|
||||||
|
sh.Cap = len(de.Name)
|
||||||
|
sh.Len = sh.Cap
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// modeTypeFromDirent converts a syscall defined constant, which is in purview
|
||||||
|
// of OS, to a constant defined by Go, assumed by this project to be stable.
|
||||||
|
//
|
||||||
|
// When the syscall constant is not recognized, this function falls back to a
|
||||||
|
// Stat on the file system.
|
||||||
|
func (fs *UnixFS) modeTypeFromDirent(de *unix.Dirent, fd int, name string) (FileMode, error) {
|
||||||
|
switch de.Type {
|
||||||
|
case unix.DT_REG:
|
||||||
|
return 0, nil
|
||||||
|
case unix.DT_DIR:
|
||||||
|
return ModeDir, nil
|
||||||
|
case unix.DT_LNK:
|
||||||
|
return ModeSymlink, nil
|
||||||
|
case unix.DT_CHR:
|
||||||
|
return ModeDevice | ModeCharDevice, nil
|
||||||
|
case unix.DT_BLK:
|
||||||
|
return ModeDevice, nil
|
||||||
|
case unix.DT_FIFO:
|
||||||
|
return ModeNamedPipe, nil
|
||||||
|
case unix.DT_SOCK:
|
||||||
|
return ModeSocket, nil
|
||||||
|
default:
|
||||||
|
// If syscall returned unknown type (e.g., DT_UNKNOWN, DT_WHT), then
|
||||||
|
// resolve actual mode by reading file information.
|
||||||
|
return fs.modeType(fd, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// modeType returns the mode type of the file system entry identified by
|
||||||
|
// osPathname by calling os.LStat function, to intentionally not follow symbolic
|
||||||
|
// links.
|
||||||
|
//
|
||||||
|
// Even though os.LStat provides all file mode bits, we want to ensure same
|
||||||
|
// values returned to caller regardless of whether we obtained file mode bits
|
||||||
|
// from syscall or stat call. Therefore, mask out the additional file mode bits
|
||||||
|
// that are provided by stat but not by the syscall, so users can rely on their
|
||||||
|
// values.
|
||||||
|
func (fs *UnixFS) modeType(dirfd int, name string) (FileMode, error) {
|
||||||
|
fi, err := fs.Lstatat(dirfd, name)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("ufs: error finding mode type for %s during readDir: %w", name, err)
|
||||||
|
}
|
||||||
|
return fi.Mode() & ModeType, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var minimumScratchBufferSize = os.Getpagesize()
|
||||||
|
|
||||||
|
func newScratchBuffer() []byte {
|
||||||
|
return make([]byte, minimumScratchBufferSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) readDir(fd int, name, relative string, b []byte) ([]DirEntry, error) {
|
||||||
|
scratchBuffer := b
|
||||||
|
if scratchBuffer == nil || len(scratchBuffer) < minimumScratchBufferSize {
|
||||||
|
scratchBuffer = newScratchBuffer()
|
||||||
|
}
|
||||||
|
|
||||||
|
var entries []DirEntry
|
||||||
|
var workBuffer []byte
|
||||||
|
|
||||||
|
var sde unix.Dirent
|
||||||
|
for {
|
||||||
|
if len(workBuffer) == 0 {
|
||||||
|
n, err := unix.Getdents(fd, scratchBuffer)
|
||||||
|
if err != nil {
|
||||||
|
if err == unix.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, ensurePathError(err, "getdents", name)
|
||||||
|
}
|
||||||
|
if n <= 0 {
|
||||||
|
// end of directory: normal exit
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
workBuffer = scratchBuffer[:n] // trim work buffer to number of bytes read
|
||||||
|
}
|
||||||
|
|
||||||
|
// "Go is like C, except that you just put `unsafe` all over the place".
|
||||||
|
copy((*[unsafe.Sizeof(unix.Dirent{})]byte)(unsafe.Pointer(&sde))[:], workBuffer)
|
||||||
|
workBuffer = workBuffer[sde.Reclen:] // advance buffer for next iteration through loop
|
||||||
|
|
||||||
|
if sde.Ino == 0 {
|
||||||
|
continue // inode set to 0 indicates an entry that was marked as deleted
|
||||||
|
}
|
||||||
|
|
||||||
|
nameSlice := nameFromDirent(&sde)
|
||||||
|
nameLength := len(nameSlice)
|
||||||
|
|
||||||
|
if nameLength == 0 || (nameSlice[0] == '.' && (nameLength == 1 || (nameLength == 2 && nameSlice[1] == '.'))) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
childName := string(nameSlice)
|
||||||
|
mt, err := fs.modeTypeFromDirent(&sde, fd, childName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var rel string
|
||||||
|
if relative == "." {
|
||||||
|
rel = name
|
||||||
|
} else {
|
||||||
|
rel = path.Join(relative, childName)
|
||||||
|
}
|
||||||
|
entries = append(entries, &dirent{dirfd: fd, name: childName, path: rel, modeType: mt, fs: fs})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dirent stores the name and file system mode type of discovered file system
|
||||||
|
// entries.
|
||||||
|
type dirent struct {
|
||||||
|
dirfd int
|
||||||
|
name string
|
||||||
|
path string
|
||||||
|
modeType FileMode
|
||||||
|
|
||||||
|
fs *UnixFS
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de dirent) Name() string {
|
||||||
|
return de.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de dirent) IsDir() bool {
|
||||||
|
return de.modeType&ModeDir != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de dirent) Type() FileMode {
|
||||||
|
return de.modeType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de dirent) Info() (FileInfo, error) {
|
||||||
|
if de.fs == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return de.fs.Lstatat(de.dirfd, de.name)
|
||||||
|
// return de.fs.Lstat(de.path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de dirent) Open() (File, error) {
|
||||||
|
if de.fs == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return de.fs.OpenFileat(de.dirfd, de.name, O_RDONLY, 0)
|
||||||
|
// return de.fs.OpenFile(de.path, O_RDONLY, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset releases memory held by entry err and name, and resets mode type to 0.
|
||||||
|
func (de *dirent) reset() {
|
||||||
|
de.name = ""
|
||||||
|
de.path = ""
|
||||||
|
de.modeType = 0
|
||||||
|
de.dirfd = 0
|
||||||
|
}
|
||||||
@ -2,8 +2,6 @@ package parser
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -29,24 +27,14 @@ var configMatchRegex = regexp.MustCompile(`{{\s?config\.([\w.-]+)\s?}}`)
|
|||||||
// matching:
|
// matching:
|
||||||
//
|
//
|
||||||
// <Root>
|
// <Root>
|
||||||
// <Property value="testing"/>
|
//
|
||||||
|
// <Property value="testing"/>
|
||||||
|
//
|
||||||
// </Root>
|
// </Root>
|
||||||
//
|
//
|
||||||
// noinspection RegExpRedundantEscape
|
// noinspection RegExpRedundantEscape
|
||||||
var xmlValueMatchRegex = regexp.MustCompile(`^\[([\w]+)='(.*)'\]$`)
|
var xmlValueMatchRegex = regexp.MustCompile(`^\[([\w]+)='(.*)'\]$`)
|
||||||
|
|
||||||
// Gets the []byte representation of a configuration file to be passed through to other
|
|
||||||
// handler functions. If the file does not currently exist, it will be created.
|
|
||||||
func readFileBytes(path string) ([]byte, error) {
|
|
||||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
return io.ReadAll(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets the value of a key based on the value type defined.
|
// Gets the value of a key based on the value type defined.
|
||||||
func (cfr *ConfigurationFileReplacement) getKeyValue(value string) interface{} {
|
func (cfr *ConfigurationFileReplacement) getKeyValue(value string) interface{} {
|
||||||
if cfr.ReplaceWith.Type() == jsonparser.Boolean {
|
if cfr.ReplaceWith.Type() == jsonparser.Boolean {
|
||||||
|
|||||||
254
parser/parser.go
254
parser/parser.go
@ -2,8 +2,9 @@ package parser
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"os"
|
"bytes"
|
||||||
"path/filepath"
|
"encoding/json"
|
||||||
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -11,13 +12,13 @@ import (
|
|||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/beevik/etree"
|
"github.com/beevik/etree"
|
||||||
"github.com/buger/jsonparser"
|
"github.com/buger/jsonparser"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
"github.com/icza/dyno"
|
"github.com/icza/dyno"
|
||||||
"github.com/magiconair/properties"
|
"github.com/magiconair/properties"
|
||||||
"gopkg.in/ini.v1"
|
"gopkg.in/ini.v1"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The file parsing options that are available for a server configuration file.
|
// The file parsing options that are available for a server configuration file.
|
||||||
@ -74,6 +75,26 @@ func (cv *ReplaceValue) String() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cv *ReplaceValue) Bytes() []byte {
|
||||||
|
switch cv.Type() {
|
||||||
|
case jsonparser.String:
|
||||||
|
var stackbuf [64]byte
|
||||||
|
bU, err := jsonparser.Unescape(cv.value, stackbuf[:])
|
||||||
|
if err != nil {
|
||||||
|
panic(errors.Wrap(err, "parser: could not parse value"))
|
||||||
|
}
|
||||||
|
return bU
|
||||||
|
case jsonparser.Null:
|
||||||
|
return []byte("<nil>")
|
||||||
|
case jsonparser.Boolean:
|
||||||
|
return cv.value
|
||||||
|
case jsonparser.Number:
|
||||||
|
return cv.value
|
||||||
|
default:
|
||||||
|
return []byte("<invalid>")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type ConfigurationParser string
|
type ConfigurationParser string
|
||||||
|
|
||||||
func (cp ConfigurationParser) String() string {
|
func (cp ConfigurationParser) String() string {
|
||||||
@ -167,11 +188,12 @@ func (cfr *ConfigurationFileReplacement) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a given configuration file and updates all of the values within as defined
|
// Parse parses a given configuration file and updates all the values within
|
||||||
// in the API response from the Panel.
|
// as defined in the API response from the Panel.
|
||||||
func (f *ConfigurationFile) Parse(path string, internal bool) error {
|
func (f *ConfigurationFile) Parse(file ufs.File) error {
|
||||||
log.WithField("path", path).WithField("parser", f.Parser.String()).Debug("parsing server configuration file")
|
// log.WithField("path", path).WithField("parser", f.Parser.String()).Debug("parsing server configuration file")
|
||||||
|
|
||||||
|
// What the fuck is going on here?
|
||||||
if mb, err := json.Marshal(config.Get()); err != nil {
|
if mb, err := json.Marshal(config.Get()); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
@ -182,56 +204,24 @@ func (f *ConfigurationFile) Parse(path string, internal bool) error {
|
|||||||
|
|
||||||
switch f.Parser {
|
switch f.Parser {
|
||||||
case Properties:
|
case Properties:
|
||||||
err = f.parsePropertiesFile(path)
|
err = f.parsePropertiesFile(file)
|
||||||
break
|
|
||||||
case File:
|
case File:
|
||||||
err = f.parseTextFile(path)
|
err = f.parseTextFile(file)
|
||||||
break
|
|
||||||
case Yaml, "yml":
|
case Yaml, "yml":
|
||||||
err = f.parseYamlFile(path)
|
err = f.parseYamlFile(file)
|
||||||
break
|
|
||||||
case Json:
|
case Json:
|
||||||
err = f.parseJsonFile(path)
|
err = f.parseJsonFile(file)
|
||||||
break
|
|
||||||
case Ini:
|
case Ini:
|
||||||
err = f.parseIniFile(path)
|
err = f.parseIniFile(file)
|
||||||
break
|
|
||||||
case Xml:
|
case Xml:
|
||||||
err = f.parseXmlFile(path)
|
err = f.parseXmlFile(file)
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
// File doesn't exist, we tried creating it, and same error is returned? Pretty
|
|
||||||
// sure this pathway is impossible, but if not, abort here.
|
|
||||||
if internal {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
b := strings.TrimSuffix(path, filepath.Base(path))
|
|
||||||
if err := os.MkdirAll(b, 0o755); err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to create base directory for missing configuration file")
|
|
||||||
} else {
|
|
||||||
if _, err := os.Create(path); err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to create missing configuration file")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return f.Parse(path, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses an xml file.
|
// Parses an xml file.
|
||||||
func (f *ConfigurationFile) parseXmlFile(path string) error {
|
func (f *ConfigurationFile) parseXmlFile(file ufs.File) error {
|
||||||
doc := etree.NewDocument()
|
doc := etree.NewDocument()
|
||||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
if _, err := doc.ReadFrom(file); err != nil {
|
if _, err := doc.ReadFrom(file); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -291,41 +281,27 @@ func (f *ConfigurationFile) parseXmlFile(path string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If you don't truncate the file you'll end up duplicating the data in there (or just appending
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
// to the end of the file. We don't want to do that.
|
return err
|
||||||
|
}
|
||||||
if err := file.Truncate(0); err != nil {
|
if err := file.Truncate(0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move the cursor to the start of the file to avoid weird spacing issues.
|
|
||||||
file.Seek(0, 0)
|
|
||||||
|
|
||||||
// Ensure the XML is indented properly.
|
// Ensure the XML is indented properly.
|
||||||
doc.Indent(2)
|
doc.Indent(2)
|
||||||
|
|
||||||
// Truncate the file before attempting to write the changes.
|
// Write the XML to the file.
|
||||||
if err := os.Truncate(path, 0); err != nil {
|
if _, err := doc.WriteTo(file); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
// Write the XML to the file.
|
|
||||||
_, err = doc.WriteTo(file)
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses an ini file.
|
// Parses an ini file.
|
||||||
func (f *ConfigurationFile) parseIniFile(path string) error {
|
func (f *ConfigurationFile) parseIniFile(file ufs.File) error {
|
||||||
// Ini package can't handle a non-existent file, so handle that automatically here
|
// Wrap the file in a NopCloser so the ini package doesn't close the file.
|
||||||
// by creating it if not exists. Then, immediately close the file since we will use
|
cfg, err := ini.Load(io.NopCloser(file))
|
||||||
// other methods to write the new contents.
|
|
||||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
file.Close()
|
|
||||||
|
|
||||||
cfg, err := ini.Load(path)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -388,14 +364,24 @@ func (f *ConfigurationFile) parseIniFile(path string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return cfg.SaveTo(path)
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := file.Truncate(0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := cfg.WriteTo(file); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a json file updating any matching key/value pairs. If a match is not found, the
|
// Parses a json file updating any matching key/value pairs. If a match is not found, the
|
||||||
// value is set regardless in the file. See the commentary in parseYamlFile for more details
|
// value is set regardless in the file. See the commentary in parseYamlFile for more details
|
||||||
// about what is happening during this process.
|
// about what is happening during this process.
|
||||||
func (f *ConfigurationFile) parseJsonFile(path string) error {
|
func (f *ConfigurationFile) parseJsonFile(file ufs.File) error {
|
||||||
b, err := readFileBytes(path)
|
b, err := io.ReadAll(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -405,14 +391,24 @@ func (f *ConfigurationFile) parseJsonFile(path string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
output := []byte(data.StringIndent("", " "))
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
return os.WriteFile(path, output, 0o644)
|
return err
|
||||||
|
}
|
||||||
|
if err := file.Truncate(0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the data to the file.
|
||||||
|
if _, err := io.Copy(file, bytes.NewReader(data.BytesIndent("", " "))); err != nil {
|
||||||
|
return errors.Wrap(err, "parser: failed to write properties file to disk")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a yaml file and updates any matching key/value pairs before persisting
|
// Parses a yaml file and updates any matching key/value pairs before persisting
|
||||||
// it back to the disk.
|
// it back to the disk.
|
||||||
func (f *ConfigurationFile) parseYamlFile(path string) error {
|
func (f *ConfigurationFile) parseYamlFile(file ufs.File) error {
|
||||||
b, err := readFileBytes(path)
|
b, err := io.ReadAll(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -443,35 +439,56 @@ func (f *ConfigurationFile) parseYamlFile(path string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return os.WriteFile(path, marshaled, 0o644)
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := file.Truncate(0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the data to the file.
|
||||||
|
if _, err := io.Copy(file, bytes.NewReader(marshaled)); err != nil {
|
||||||
|
return errors.Wrap(err, "parser: failed to write properties file to disk")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a text file using basic find and replace. This is a highly inefficient method of
|
// Parses a text file using basic find and replace. This is a highly inefficient method of
|
||||||
// scanning a file and performing a replacement. You should attempt to use anything other
|
// scanning a file and performing a replacement. You should attempt to use anything other
|
||||||
// than this function where possible.
|
// than this function where possible.
|
||||||
func (f *ConfigurationFile) parseTextFile(path string) error {
|
func (f *ConfigurationFile) parseTextFile(file ufs.File) error {
|
||||||
input, err := os.ReadFile(path)
|
b := bytes.NewBuffer(nil)
|
||||||
if err != nil {
|
s := bufio.NewScanner(file)
|
||||||
return err
|
var replaced bool
|
||||||
}
|
for s.Scan() {
|
||||||
|
line := s.Bytes()
|
||||||
lines := strings.Split(string(input), "\n")
|
replaced = false
|
||||||
for i, line := range lines {
|
|
||||||
for _, replace := range f.Replace {
|
for _, replace := range f.Replace {
|
||||||
// If this line doesn't match what we expect for the replacement, move on to the next
|
// If this line doesn't match what we expect for the replacement, move on to the next
|
||||||
// line. Otherwise, update the line to have the replacement value.
|
// line. Otherwise, update the line to have the replacement value.
|
||||||
if !strings.HasPrefix(line, replace.Match) {
|
if !bytes.HasPrefix(line, []byte(replace.Match)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
b.Write(replace.ReplaceWith.Bytes())
|
||||||
lines[i] = replace.ReplaceWith.String()
|
replaced = true
|
||||||
}
|
}
|
||||||
|
if !replaced {
|
||||||
|
b.Write(line)
|
||||||
|
}
|
||||||
|
b.WriteByte('\n')
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.WriteFile(path, []byte(strings.Join(lines, "\n")), 0o644); err != nil {
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := file.Truncate(0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write the data to the file.
|
||||||
|
if _, err := io.Copy(file, b); err != nil {
|
||||||
|
return errors.Wrap(err, "parser: failed to write properties file to disk")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -501,31 +518,29 @@ func (f *ConfigurationFile) parseTextFile(path string) error {
|
|||||||
//
|
//
|
||||||
// @see https://github.com/pterodactyl/panel/issues/2308 (original)
|
// @see https://github.com/pterodactyl/panel/issues/2308 (original)
|
||||||
// @see https://github.com/pterodactyl/panel/issues/3009 ("bug" introduced as result)
|
// @see https://github.com/pterodactyl/panel/issues/3009 ("bug" introduced as result)
|
||||||
func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
func (f *ConfigurationFile) parsePropertiesFile(file ufs.File) error {
|
||||||
var s strings.Builder
|
b, err := io.ReadAll(file)
|
||||||
// Open the file and attempt to load any comments that currenty exist at the start
|
if err != nil {
|
||||||
// of the file. This is kind of a hack, but should work for a majority of users for
|
return err
|
||||||
// the time being.
|
|
||||||
if fd, err := os.Open(path); err != nil {
|
|
||||||
return errors.Wrap(err, "parser: could not open file for reading")
|
|
||||||
} else {
|
|
||||||
scanner := bufio.NewScanner(fd)
|
|
||||||
// Scan until we hit a line that is not a comment that actually has content
|
|
||||||
// on it. Keep appending the comments until that time.
|
|
||||||
for scanner.Scan() {
|
|
||||||
text := scanner.Text()
|
|
||||||
if len(text) > 0 && text[0] != '#' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s.WriteString(text + "\n")
|
|
||||||
}
|
|
||||||
_ = fd.Close()
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return errors.WithStackIf(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err := properties.LoadFile(path, properties.UTF8)
|
s := bytes.NewBuffer(nil)
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(b))
|
||||||
|
// Scan until we hit a line that is not a comment that actually has content
|
||||||
|
// on it. Keep appending the comments until that time.
|
||||||
|
for scanner.Scan() {
|
||||||
|
text := scanner.Bytes()
|
||||||
|
if len(text) > 0 && text[0] != '#' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s.Write(text)
|
||||||
|
s.WriteByte('\n')
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := properties.Load(b, properties.UTF8)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "parser: could not load properties file for configuration update")
|
return errors.Wrap(err, "parser: could not load properties file for configuration update")
|
||||||
}
|
}
|
||||||
@ -563,17 +578,16 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
|||||||
s.WriteString(key + "=" + strings.Trim(strconv.QuoteToASCII(value), "\"") + "\n")
|
s.WriteString(key + "=" + strings.Trim(strconv.QuoteToASCII(value), "\"") + "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open the file for writing.
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
w, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer w.Close()
|
if err := file.Truncate(0); err != nil {
|
||||||
|
return err
|
||||||
// Write the data to the file.
|
|
||||||
if _, err := w.Write([]byte(s.String())); err != nil {
|
|
||||||
return errors.Wrap(err, "parser: failed to write properties file to disk")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write the data to the file.
|
||||||
|
if _, err := io.Copy(file, s); err != nil {
|
||||||
|
return errors.Wrap(err, "parser: failed to write properties file to disk")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,6 +3,7 @@ package remote
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -15,7 +16,6 @@ import (
|
|||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/cenkalti/backoff/v4"
|
"github.com/cenkalti/backoff/v4"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|||||||
@ -2,11 +2,11 @@ package remote
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/parser"
|
"github.com/pterodactyl/wings/parser"
|
||||||
)
|
)
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package downloader
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"mime"
|
"mime"
|
||||||
@ -14,26 +15,63 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
)
|
)
|
||||||
|
|
||||||
var client = &http.Client{
|
var client *http.Client
|
||||||
Timeout: time.Hour * 12,
|
|
||||||
// Disallow any redirect on an HTTP call. This is a security requirement: do not modify
|
func init() {
|
||||||
// this logic without first ensuring that the new target location IS NOT within the current
|
dialer := &net.Dialer{
|
||||||
// instance's local network.
|
LocalAddr: nil,
|
||||||
//
|
}
|
||||||
// This specific error response just causes the client to not follow the redirect and
|
|
||||||
// returns the actual redirect response to the caller. Not perfect, but simple and most
|
trnspt := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
// people won't be using URLs that redirect anyways hopefully?
|
trnspt.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||||
//
|
c, err := dialer.DialContext(ctx, network, addr)
|
||||||
// We'll re-evaluate this down the road if needed.
|
if err != nil {
|
||||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
return nil, errors.WithStack(err)
|
||||||
return http.ErrUseLastResponse
|
}
|
||||||
},
|
|
||||||
|
ipStr, _, err := net.SplitHostPort(c.RemoteAddr().String())
|
||||||
|
if err != nil {
|
||||||
|
return c, errors.WithStack(err)
|
||||||
|
}
|
||||||
|
ip := net.ParseIP(ipStr)
|
||||||
|
if ip == nil {
|
||||||
|
return c, errors.WithStack(ErrInvalidIPAddress)
|
||||||
|
}
|
||||||
|
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() {
|
||||||
|
return c, errors.WithStack(ErrInternalResolution)
|
||||||
|
}
|
||||||
|
for _, block := range internalRanges {
|
||||||
|
if !block.Contains(ip) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return c, errors.WithStack(ErrInternalResolution)
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
client = &http.Client{
|
||||||
|
Timeout: time.Hour * 12,
|
||||||
|
|
||||||
|
Transport: trnspt,
|
||||||
|
|
||||||
|
// Disallow any redirect on an HTTP call. This is a security requirement: do not modify
|
||||||
|
// this logic without first ensuring that the new target location IS NOT within the current
|
||||||
|
// instance's local network.
|
||||||
|
//
|
||||||
|
// This specific error response just causes the client to not follow the redirect and
|
||||||
|
// returns the actual redirect response to the caller. Not perfect, but simple and most
|
||||||
|
// people won't be using URLs that redirect anyways hopefully?
|
||||||
|
//
|
||||||
|
// We'll re-evaluate this down the road if needed.
|
||||||
|
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||||
|
return http.ErrUseLastResponse
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var instance = &Downloader{
|
var instance = &Downloader{
|
||||||
@ -143,12 +181,6 @@ func (dl *Download) Execute() error {
|
|||||||
dl.cancelFunc = &cancel
|
dl.cancelFunc = &cancel
|
||||||
defer dl.Cancel()
|
defer dl.Cancel()
|
||||||
|
|
||||||
// Always ensure that we're checking the destination for the download to avoid a malicious
|
|
||||||
// user from accessing internal network resources.
|
|
||||||
if err := dl.isExternalNetwork(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point we have verified the destination is not within the local network, so we can
|
// At this point we have verified the destination is not within the local network, so we can
|
||||||
// now make a request to that URL and pull down the file, saving it to the server's data
|
// now make a request to that URL and pull down the file, saving it to the server's data
|
||||||
// directory.
|
// directory.
|
||||||
@ -167,13 +199,8 @@ func (dl *Download) Execute() error {
|
|||||||
return errors.New("downloader: got bad response status from endpoint: " + res.Status)
|
return errors.New("downloader: got bad response status from endpoint: " + res.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a Content-Length header on this request go ahead and check that we can
|
if res.ContentLength < 1 {
|
||||||
// even write the whole file before beginning this process. If there is no header present
|
return errors.New("downloader: request is missing ContentLength")
|
||||||
// we'll just have to give it a spin and see how it goes.
|
|
||||||
if res.ContentLength > 0 {
|
|
||||||
if err := dl.server.Filesystem().HasSpaceFor(res.ContentLength); err != nil {
|
|
||||||
return errors.WrapIf(err, "downloader: failed to write file: not enough space")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if dl.req.UseHeader {
|
if dl.req.UseHeader {
|
||||||
@ -200,8 +227,10 @@ func (dl *Download) Execute() error {
|
|||||||
p := dl.Path()
|
p := dl.Path()
|
||||||
dl.server.Log().WithField("path", p).Debug("writing remote file to disk")
|
dl.server.Log().WithField("path", p).Debug("writing remote file to disk")
|
||||||
|
|
||||||
|
// Write the file while tracking the progress, Write will check that the
|
||||||
|
// size of the file won't exceed the disk limit.
|
||||||
r := io.TeeReader(res.Body, dl.counter(res.ContentLength))
|
r := io.TeeReader(res.Body, dl.counter(res.ContentLength))
|
||||||
if err := dl.server.Filesystem().Writefile(p, r); err != nil {
|
if err := dl.server.Filesystem().Write(p, r, res.ContentLength, 0o644); err != nil {
|
||||||
return errors.WrapIf(err, "downloader: failed to write file to server directory")
|
return errors.WrapIf(err, "downloader: failed to write file to server directory")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -246,59 +275,6 @@ func (dl *Download) counter(contentLength int64) *Counter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verifies that a given download resolves to a location not within the current local
|
|
||||||
// network for the machine. If the final destination of a resource is within the local
|
|
||||||
// network an ErrInternalResolution error is returned.
|
|
||||||
func (dl *Download) isExternalNetwork(ctx context.Context) error {
|
|
||||||
dialer := &net.Dialer{
|
|
||||||
LocalAddr: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
host := dl.req.URL.Host
|
|
||||||
|
|
||||||
// This cluster-fuck of math and integer shit converts an integer IP into a proper IPv4.
|
|
||||||
// For example: 16843009 would become 1.1.1.1
|
|
||||||
//if i, err := strconv.ParseInt(host, 10, 64); err == nil {
|
|
||||||
// host = strconv.FormatInt((i>>24)&0xFF, 10) + "." + strconv.FormatInt((i>>16)&0xFF, 10) + "." + strconv.FormatInt((i>>8)&0xFF, 10) + "." + strconv.FormatInt(i&0xFF, 10)
|
|
||||||
//}
|
|
||||||
|
|
||||||
if _, _, err := net.SplitHostPort(host); err != nil {
|
|
||||||
if !strings.Contains(err.Error(), "missing port in address") {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
switch dl.req.URL.Scheme {
|
|
||||||
case "http":
|
|
||||||
host += ":80"
|
|
||||||
case "https":
|
|
||||||
host += ":443"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := dialer.DialContext(ctx, "tcp", host)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
_ = c.Close()
|
|
||||||
|
|
||||||
ipStr, _, err := net.SplitHostPort(c.RemoteAddr().String())
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
ip := net.ParseIP(ipStr)
|
|
||||||
if ip == nil {
|
|
||||||
return errors.WithStack(ErrInvalidIPAddress)
|
|
||||||
}
|
|
||||||
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() {
|
|
||||||
return errors.WithStack(ErrInternalResolution)
|
|
||||||
}
|
|
||||||
for _, block := range internalRanges {
|
|
||||||
if block.Contains(ip) {
|
|
||||||
return errors.WithStack(ErrInternalResolution)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Downloader represents a global downloader that keeps track of all currently processing downloads
|
// Downloader represents a global downloader that keeps track of all currently processing downloads
|
||||||
// for the machine.
|
// for the machine.
|
||||||
type Downloader struct {
|
type Downloader struct {
|
||||||
|
|||||||
@ -168,7 +168,6 @@ func RequireAuthorization() gin.HandlerFunc {
|
|||||||
// We don't put this value outside this function since the node's authentication
|
// We don't put this value outside this function since the node's authentication
|
||||||
// token can be changed on the fly and the config.Get() call returns a copy, so
|
// token can be changed on the fly and the config.Get() call returns a copy, so
|
||||||
// if it is rotated this value will never properly get updated.
|
// if it is rotated this value will never properly get updated.
|
||||||
token := config.Get().AuthenticationToken
|
|
||||||
auth := strings.SplitN(c.GetHeader("Authorization"), " ", 2)
|
auth := strings.SplitN(c.GetHeader("Authorization"), " ", 2)
|
||||||
if len(auth) != 2 || auth[0] != "Bearer" {
|
if len(auth) != 2 || auth[0] != "Bearer" {
|
||||||
c.Header("WWW-Authenticate", "Bearer")
|
c.Header("WWW-Authenticate", "Bearer")
|
||||||
@ -179,7 +178,7 @@ func RequireAuthorization() gin.HandlerFunc {
|
|||||||
// All requests to Wings must be authorized with the authentication token present in
|
// All requests to Wings must be authorized with the authentication token present in
|
||||||
// the Wings configuration file. Remeber, all requests to Wings come from the Panel
|
// the Wings configuration file. Remeber, all requests to Wings come from the Panel
|
||||||
// backend, or using a signed JWT for temporary authentication.
|
// backend, or using a signed JWT for temporary authentication.
|
||||||
if subtle.ConstantTimeCompare([]byte(auth[1]), []byte(token)) != 1 {
|
if subtle.ConstantTimeCompare([]byte(auth[1]), []byte(config.Get().Token.Token)) != 1 {
|
||||||
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"error": "You are not authorized to access this endpoint."})
|
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"error": "You are not authorized to access this endpoint."})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/router/middleware"
|
"github.com/pterodactyl/wings/router/middleware"
|
||||||
"github.com/pterodactyl/wings/router/tokens"
|
"github.com/pterodactyl/wings/router/tokens"
|
||||||
@ -19,12 +20,14 @@ func getDownloadBackup(c *gin.Context) {
|
|||||||
client := middleware.ExtractApiClient(c)
|
client := middleware.ExtractApiClient(c)
|
||||||
manager := middleware.ExtractManager(c)
|
manager := middleware.ExtractManager(c)
|
||||||
|
|
||||||
|
// Get the payload from the token.
|
||||||
token := tokens.BackupPayload{}
|
token := tokens.BackupPayload{}
|
||||||
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
|
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
|
||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get the server using the UUID from the token.
|
||||||
if _, ok := manager.Get(token.ServerUuid); !ok || !token.IsUniqueRequest() {
|
if _, ok := manager.Get(token.ServerUuid); !ok || !token.IsUniqueRequest() {
|
||||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||||
"error": "The requested resource was not found on this server.",
|
"error": "The requested resource was not found on this server.",
|
||||||
@ -32,6 +35,14 @@ func getDownloadBackup(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate that the BackupUuid field is actually a UUID and not some random characters or a
|
||||||
|
// file path.
|
||||||
|
if _, err := uuid.Parse(token.BackupUuid); err != nil {
|
||||||
|
middleware.CaptureAndAbort(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Locate the backup on the local disk.
|
||||||
b, st, err := backup.LocateLocal(client, token.BackupUuid)
|
b, st, err := backup.LocateLocal(client, token.BackupUuid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
@ -45,6 +56,8 @@ func getDownloadBackup(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The use of `os` here is safe as backups are not stored within server
|
||||||
|
// accessible directories.
|
||||||
f, err := os.Open(b.Path())
|
f, err := os.Open(b.Path())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
@ -76,26 +89,18 @@ func getDownloadFile(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p, _ := s.Filesystem().SafePath(token.FilePath)
|
f, st, err := s.Filesystem().File(token.FilePath)
|
||||||
st, err := os.Stat(p)
|
|
||||||
// If there is an error or we're somehow trying to download a directory, just
|
|
||||||
// respond with the appropriate error.
|
|
||||||
if err != nil {
|
|
||||||
middleware.CaptureAndAbort(c, err)
|
|
||||||
return
|
|
||||||
} else if st.IsDir() {
|
|
||||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
|
||||||
"error": "The requested resource was not found on this server.",
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Open(p)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
if st.IsDir() {
|
||||||
|
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||||
|
"error": "The requested resource was not found on this server.",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
|
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
|
||||||
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(st.Name()))
|
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(st.Name()))
|
||||||
|
|||||||
@ -227,19 +227,19 @@ func deleteServer(c *gin.Context) {
|
|||||||
//
|
//
|
||||||
// In addition, servers with large amounts of files can take some time to finish deleting,
|
// In addition, servers with large amounts of files can take some time to finish deleting,
|
||||||
// so we don't want to block the HTTP call while waiting on this.
|
// so we don't want to block the HTTP call while waiting on this.
|
||||||
go func(p string) {
|
go func(s *server.Server) {
|
||||||
|
fs := s.Filesystem()
|
||||||
|
p := fs.Path()
|
||||||
|
_ = fs.UnixFS().Close()
|
||||||
if err := os.RemoveAll(p); err != nil {
|
if err := os.RemoveAll(p); err != nil {
|
||||||
log.WithFields(log.Fields{"path": p, "error": err}).Warn("failed to remove server files during deletion process")
|
log.WithFields(log.Fields{"path": p, "error": err}).Warn("failed to remove server files during deletion process")
|
||||||
}
|
}
|
||||||
}(s.Filesystem().Path())
|
}(s)
|
||||||
|
|
||||||
middleware.ExtractManager(c).Remove(func(server *server.Server) bool {
|
middleware.ExtractManager(c).Remove(func(server *server.Server) bool {
|
||||||
return server.ID() == s.ID()
|
return server.ID() == s.ID()
|
||||||
})
|
})
|
||||||
|
|
||||||
// Deallocate the reference to this server.
|
|
||||||
s = nil
|
|
||||||
|
|
||||||
c.Status(http.StatusNoContent)
|
c.Status(http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -30,7 +30,7 @@ import (
|
|||||||
// getServerFileContents returns the contents of a file on the server.
|
// getServerFileContents returns the contents of a file on the server.
|
||||||
func getServerFileContents(c *gin.Context) {
|
func getServerFileContents(c *gin.Context) {
|
||||||
s := middleware.ExtractServer(c)
|
s := middleware.ExtractServer(c)
|
||||||
p := "/" + strings.TrimLeft(c.Query("file"), "/")
|
p := strings.TrimLeft(c.Query("file"), "/")
|
||||||
f, st, err := s.Filesystem().File(p)
|
f, st, err := s.Filesystem().File(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
@ -129,7 +129,6 @@ func putServerRenameFiles(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
if err := fs.Rename(pf, pt); err != nil {
|
if err := fs.Rename(pf, pt); err != nil {
|
||||||
// Return nil if the error is an is not exists.
|
// Return nil if the error is an is not exists.
|
||||||
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
s.Log().WithField("error", err).
|
s.Log().WithField("error", err).
|
||||||
WithField("from_path", pf).
|
WithField("from_path", pf).
|
||||||
@ -239,7 +238,16 @@ func postServerWriteFile(c *gin.Context) {
|
|||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := s.Filesystem().Writefile(f, c.Request.Body); err != nil {
|
|
||||||
|
// A content length of -1 means the actual length is unknown.
|
||||||
|
if c.Request.ContentLength == -1 {
|
||||||
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||||
|
"error": "Missing Content-Length",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.Filesystem().Write(f, c.Request.Body, c.Request.ContentLength, 0o644); err != nil {
|
||||||
if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) {
|
if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) {
|
||||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||||
"error": "Cannot write file, name conflicts with an existing directory by the same name.",
|
"error": "Cannot write file, name conflicts with an existing directory by the same name.",
|
||||||
@ -589,15 +597,9 @@ func postServerUploadFiles(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, header := range headers {
|
for _, header := range headers {
|
||||||
p, err := s.Filesystem().SafePath(filepath.Join(directory, header.Filename))
|
|
||||||
if err != nil {
|
|
||||||
middleware.CaptureAndAbort(c, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// We run this in a different method so I can use defer without any of
|
// We run this in a different method so I can use defer without any of
|
||||||
// the consequences caused by calling it in a loop.
|
// the consequences caused by calling it in a loop.
|
||||||
if err := handleFileUpload(p, s, header); err != nil {
|
if err := handleFileUpload(filepath.Join(directory, header.Filename), s, header); err != nil {
|
||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
@ -619,7 +621,8 @@ func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader)
|
|||||||
if err := s.Filesystem().IsIgnored(p); err != nil {
|
if err := s.Filesystem().IsIgnored(p); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := s.Filesystem().Writefile(p, file); err != nil {
|
|
||||||
|
if err := s.Filesystem().Write(p, file, header.Size, 0o644); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@ -2,10 +2,10 @@ package router
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
ws "github.com/gorilla/websocket"
|
ws "github.com/gorilla/websocket"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/router/middleware"
|
"github.com/pterodactyl/wings/router/middleware"
|
||||||
|
|||||||
@ -106,6 +106,7 @@ func postTransfers(c *gin.Context) {
|
|||||||
if !successful && err != nil {
|
if !successful && err != nil {
|
||||||
// Delete all extracted files.
|
// Delete all extracted files.
|
||||||
go func(trnsfr *transfer.Transfer) {
|
go func(trnsfr *transfer.Transfer) {
|
||||||
|
_ = trnsfr.Server.Filesystem().UnixFS().Close()
|
||||||
if err := os.RemoveAll(trnsfr.Server.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
|
if err := os.RemoveAll(trnsfr.Server.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
|
||||||
trnsfr.Log().WithError(err).Warn("failed to delete local server files")
|
trnsfr.Log().WithError(err).Warn("failed to delete local server files")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,11 +2,11 @@ package websocket
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/events"
|
"github.com/pterodactyl/wings/events"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package websocket
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
@ -14,7 +15,6 @@ import (
|
|||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
|
|
||||||
|
|||||||
@ -67,7 +67,7 @@ func (s *Server) Backup(b backup.BackupInterface) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ad, err := b.Generate(s.Context(), s.Filesystem().Path(), ignored)
|
ad, err := b.Generate(s.Context(), s.Filesystem(), ignored)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); err != nil {
|
if err := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); err != nil {
|
||||||
s.Log().WithFields(log.Fields{
|
s.Log().WithFields(log.Fields{
|
||||||
@ -154,17 +154,14 @@ func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (
|
|||||||
err = b.Restore(s.Context(), reader, func(file string, info fs.FileInfo, r io.ReadCloser) error {
|
err = b.Restore(s.Context(), reader, func(file string, info fs.FileInfo, r io.ReadCloser) error {
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
s.Events().Publish(DaemonMessageEvent, "(restoring): "+file)
|
s.Events().Publish(DaemonMessageEvent, "(restoring): "+file)
|
||||||
|
// TODO: since this will be called a lot, it may be worth adding an optimized
|
||||||
if err := s.Filesystem().Writefile(file, r); err != nil {
|
// Write with Chtimes method to the UnixFS that is able to re-use the
|
||||||
|
// same dirfd and file name.
|
||||||
|
if err := s.Filesystem().Write(file, r, info.Size(), info.Mode()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := s.Filesystem().Chmod(file, info.Mode()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
atime := info.ModTime()
|
atime := info.ModTime()
|
||||||
mtime := atime
|
return s.Filesystem().Chtimes(file, atime, atime)
|
||||||
return s.Filesystem().Chtimes(file, atime, mtime)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return errors.WithStackIf(err)
|
return errors.WithStackIf(err)
|
||||||
|
|||||||
@ -11,16 +11,18 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/mholt/archiver/v4"
|
"github.com/mholt/archives"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
|
"github.com/pterodactyl/wings/server/filesystem"
|
||||||
)
|
)
|
||||||
|
|
||||||
var format = archiver.CompressedArchive{
|
var format = archives.CompressedArchive{
|
||||||
Compression: archiver.Gz{},
|
Compression: archives.Gz{},
|
||||||
Archival: archiver.Tar{},
|
Archival: archives.Tar{},
|
||||||
|
Extraction: archives.Tar{},
|
||||||
}
|
}
|
||||||
|
|
||||||
type AdapterType string
|
type AdapterType string
|
||||||
@ -46,7 +48,7 @@ type BackupInterface interface {
|
|||||||
WithLogContext(map[string]interface{})
|
WithLogContext(map[string]interface{})
|
||||||
// Generate creates a backup in whatever the configured source for the
|
// Generate creates a backup in whatever the configured source for the
|
||||||
// specific implementation is.
|
// specific implementation is.
|
||||||
Generate(context.Context, string, string) (*ArchiveDetails, error)
|
Generate(context.Context, *filesystem.Filesystem, string) (*ArchiveDetails, error)
|
||||||
// Ignored returns the ignored files for this backup instance.
|
// Ignored returns the ignored files for this backup instance.
|
||||||
Ignored() string
|
Ignored() string
|
||||||
// Checksum returns a SHA1 checksum for the generated backup.
|
// Checksum returns a SHA1 checksum for the generated backup.
|
||||||
|
|||||||
@ -7,7 +7,7 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/juju/ratelimit"
|
"github.com/juju/ratelimit"
|
||||||
"github.com/mholt/archiver/v4"
|
"github.com/mholt/archives"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
@ -59,10 +59,10 @@ func (b *LocalBackup) WithLogContext(c map[string]interface{}) {
|
|||||||
|
|
||||||
// Generate generates a backup of the selected files and pushes it to the
|
// Generate generates a backup of the selected files and pushes it to the
|
||||||
// defined location for this instance.
|
// defined location for this instance.
|
||||||
func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*ArchiveDetails, error) {
|
func (b *LocalBackup) Generate(ctx context.Context, fsys *filesystem.Filesystem, ignore string) (*ArchiveDetails, error) {
|
||||||
a := &filesystem.Archive{
|
a := &filesystem.Archive{
|
||||||
BasePath: basePath,
|
Filesystem: fsys,
|
||||||
Ignore: ignore,
|
Ignore: ignore,
|
||||||
}
|
}
|
||||||
|
|
||||||
b.log().WithField("path", b.Path()).Info("creating backup for server")
|
b.log().WithField("path", b.Path()).Info("creating backup for server")
|
||||||
@ -93,14 +93,14 @@ func (b *LocalBackup) Restore(ctx context.Context, _ io.Reader, callback Restore
|
|||||||
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
|
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
|
||||||
reader = ratelimit.Reader(f, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
|
reader = ratelimit.Reader(f, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
|
||||||
}
|
}
|
||||||
if err := format.Extract(ctx, reader, nil, func(ctx context.Context, f archiver.File) error {
|
if err := format.Extract(ctx, reader, func(ctx context.Context, f archives.FileInfo) error {
|
||||||
r, err := f.Open()
|
r, err := f.Open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
return callback(filesystem.ExtractNameFromArchive(f), f.FileInfo, r)
|
return callback(f.NameInArchive, f.FileInfo, r)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -12,7 +12,7 @@ import (
|
|||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/cenkalti/backoff/v4"
|
"github.com/cenkalti/backoff/v4"
|
||||||
"github.com/juju/ratelimit"
|
"github.com/juju/ratelimit"
|
||||||
"github.com/mholt/archiver/v4"
|
"github.com/mholt/archives"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
@ -48,12 +48,12 @@ func (s *S3Backup) WithLogContext(c map[string]interface{}) {
|
|||||||
|
|
||||||
// Generate creates a new backup on the disk, moves it into the S3 bucket via
|
// Generate creates a new backup on the disk, moves it into the S3 bucket via
|
||||||
// the provided presigned URL, and then deletes the backup from the disk.
|
// the provided presigned URL, and then deletes the backup from the disk.
|
||||||
func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*ArchiveDetails, error) {
|
func (s *S3Backup) Generate(ctx context.Context, fsys *filesystem.Filesystem, ignore string) (*ArchiveDetails, error) {
|
||||||
defer s.Remove()
|
defer s.Remove()
|
||||||
|
|
||||||
a := &filesystem.Archive{
|
a := &filesystem.Archive{
|
||||||
BasePath: basePath,
|
Filesystem: fsys,
|
||||||
Ignore: ignore,
|
Ignore: ignore,
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log().WithField("path", s.Path()).Info("creating backup for server")
|
s.log().WithField("path", s.Path()).Info("creating backup for server")
|
||||||
@ -93,14 +93,14 @@ func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCal
|
|||||||
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
|
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
|
||||||
reader = ratelimit.Reader(r, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
|
reader = ratelimit.Reader(r, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
|
||||||
}
|
}
|
||||||
if err := format.Extract(ctx, reader, nil, func(ctx context.Context, f archiver.File) error {
|
if err := format.Extract(ctx, reader, func(ctx context.Context, f archives.FileInfo) error {
|
||||||
r, err := f.Open()
|
r, err := f.Open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
return callback(filesystem.ExtractNameFromArchive(f), f.FileInfo, r)
|
return callback(f.NameInArchive, f.FileInfo, r)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -231,7 +231,6 @@ func (fu *s3FileUploader) uploadPart(ctx context.Context, part string, size int6
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}, fu.backoff(ctx))
|
}, fu.backoff(ctx))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if v, ok := err.(*backoff.PermanentError); ok {
|
if v, ok := err.(*backoff.PermanentError); ok {
|
||||||
return "", v.Unwrap()
|
return "", v.Unwrap()
|
||||||
|
|||||||
@ -4,9 +4,11 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/gammazero/workerpool"
|
"github.com/gammazero/workerpool"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// UpdateConfigurationFiles updates all of the defined configuration files for
|
// UpdateConfigurationFiles updates all the defined configuration files for
|
||||||
// a server automatically to ensure that they always use the specified values.
|
// a server automatically to ensure that they always use the specified values.
|
||||||
func (s *Server) UpdateConfigurationFiles() {
|
func (s *Server) UpdateConfigurationFiles() {
|
||||||
pool := workerpool.New(runtime.NumCPU())
|
pool := workerpool.New(runtime.NumCPU())
|
||||||
@ -18,18 +20,18 @@ func (s *Server) UpdateConfigurationFiles() {
|
|||||||
f := cf
|
f := cf
|
||||||
|
|
||||||
pool.Submit(func() {
|
pool.Submit(func() {
|
||||||
p, err := s.Filesystem().SafePath(f.FileName)
|
file, err := s.Filesystem().UnixFS().Touch(f.FileName, ufs.O_RDWR|ufs.O_CREATE, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.Log().WithField("error", err).Error("failed to generate safe path for configuration file")
|
s.Log().WithField("file_name", f.FileName).WithField("error", err).Error("failed to open file for configuration")
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
if err := f.Parse(p, false); err != nil {
|
if err := f.Parse(file); err != nil {
|
||||||
s.Log().WithField("error", err).Error("failed to parse and update server configuration file")
|
s.Log().WithField("error", err).Error("failed to parse and update server configuration file")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Log().WithField("path", f.FileName).Debug("finished processing server configuration file")
|
s.Log().WithField("file_name", f.FileName).Debug("finished processing server configuration file")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -37,7 +37,7 @@ func (s *Server) Throttler() *ConsoleThrottle {
|
|||||||
|
|
||||||
s.throttler = newConsoleThrottle(throttles.Lines, period)
|
s.throttler = newConsoleThrottle(throttles.Lines, period)
|
||||||
s.throttler.strike = func() {
|
s.throttler.strike = func() {
|
||||||
s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Server is outputting console data too quickly -- throttling..."))
|
s.PublishConsoleOutputFromDaemon("Server is outputting console data too quickly -- throttling...")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
return s.throttler
|
return s.throttler
|
||||||
|
|||||||
@ -3,7 +3,6 @@ package filesystem
|
|||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
@ -14,12 +13,12 @@ import (
|
|||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/juju/ratelimit"
|
"github.com/juju/ratelimit"
|
||||||
"github.com/karrick/godirwalk"
|
|
||||||
"github.com/klauspost/pgzip"
|
"github.com/klauspost/pgzip"
|
||||||
ignore "github.com/sabhiram/go-gitignore"
|
ignore "github.com/sabhiram/go-gitignore"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/internal/progress"
|
"github.com/pterodactyl/wings/internal/progress"
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
const memory = 4 * 1024
|
const memory = 4 * 1024
|
||||||
@ -57,27 +56,35 @@ func (p *TarProgress) Write(v []byte) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Archive struct {
|
type Archive struct {
|
||||||
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
// Filesystem to create the archive with.
|
||||||
// relative to.
|
Filesystem *Filesystem
|
||||||
BasePath string
|
|
||||||
|
|
||||||
// Ignore is a gitignore string (most likely read from a file) of files to ignore
|
// Ignore is a gitignore string (most likely read from a file) of files to ignore
|
||||||
// from the archive.
|
// from the archive.
|
||||||
Ignore string
|
Ignore string
|
||||||
|
|
||||||
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
// BaseDirectory .
|
||||||
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
BaseDirectory string
|
||||||
//
|
|
||||||
// All items in Files must be absolute within BasePath.
|
// Files specifies the files to archive, this takes priority over the Ignore
|
||||||
|
// option, if unspecified, all files in the BaseDirectory will be archived
|
||||||
|
// unless Ignore is set.
|
||||||
Files []string
|
Files []string
|
||||||
|
|
||||||
// Progress wraps the writer of the archive to pass through the progress tracker.
|
// Progress wraps the writer of the archive to pass through the progress tracker.
|
||||||
Progress *progress.Progress
|
Progress *progress.Progress
|
||||||
|
|
||||||
|
w *TarProgress
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates an archive at dst with all the files defined in the
|
// Create creates an archive at dst with all the files defined in the
|
||||||
// included Files array.
|
// included Files array.
|
||||||
|
//
|
||||||
|
// THIS IS UNSAFE TO USE IF `dst` IS PROVIDED BY A USER! ONLY USE THIS WITH
|
||||||
|
// CONTROLLED PATHS!
|
||||||
func (a *Archive) Create(ctx context.Context, dst string) error {
|
func (a *Archive) Create(ctx context.Context, dst string) error {
|
||||||
|
// Using os.OpenFile here is expected, as long as `dst` is not a user
|
||||||
|
// provided path.
|
||||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -98,14 +105,28 @@ func (a *Archive) Create(ctx context.Context, dst string) error {
|
|||||||
return a.Stream(ctx, writer)
|
return a.Stream(ctx, writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stream .
|
type walkFunc func(dirfd int, name, relative string, d ufs.DirEntry) error
|
||||||
func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
|
|
||||||
for _, f := range a.Files {
|
|
||||||
if strings.HasPrefix(f, a.BasePath) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("archive: all entries in Files must be absolute and within BasePath: %s\n", f)
|
// Stream streams the creation of the archive to the given writer.
|
||||||
|
func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
|
||||||
|
if a.Filesystem == nil {
|
||||||
|
return errors.New("filesystem: archive.Filesystem is unset")
|
||||||
|
}
|
||||||
|
|
||||||
|
// The base directory may come with a prefixed `/`, strip it to prevent
|
||||||
|
// problems.
|
||||||
|
a.BaseDirectory = strings.TrimPrefix(a.BaseDirectory, "/")
|
||||||
|
|
||||||
|
if filesLen := len(a.Files); filesLen > 0 {
|
||||||
|
files := make([]string, filesLen)
|
||||||
|
for i, f := range a.Files {
|
||||||
|
if !strings.HasPrefix(f, a.Filesystem.Path()) {
|
||||||
|
files[i] = f
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
files[i] = strings.TrimPrefix(strings.TrimPrefix(f, a.Filesystem.Path()), "/")
|
||||||
|
}
|
||||||
|
a.Files = files
|
||||||
}
|
}
|
||||||
|
|
||||||
// Choose which compression level to use based on the compression_level configuration option
|
// Choose which compression level to use based on the compression_level configuration option
|
||||||
@ -115,8 +136,6 @@ func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
|
|||||||
compressionLevel = pgzip.NoCompression
|
compressionLevel = pgzip.NoCompression
|
||||||
case "best_compression":
|
case "best_compression":
|
||||||
compressionLevel = pgzip.BestCompression
|
compressionLevel = pgzip.BestCompression
|
||||||
case "best_speed":
|
|
||||||
fallthrough
|
|
||||||
default:
|
default:
|
||||||
compressionLevel = pgzip.BestSpeed
|
compressionLevel = pgzip.BestSpeed
|
||||||
}
|
}
|
||||||
@ -130,107 +149,124 @@ func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
|
|||||||
tw := tar.NewWriter(gw)
|
tw := tar.NewWriter(gw)
|
||||||
defer tw.Close()
|
defer tw.Close()
|
||||||
|
|
||||||
pw := NewTarProgress(tw, a.Progress)
|
a.w = NewTarProgress(tw, a.Progress)
|
||||||
|
|
||||||
// Configure godirwalk.
|
fs := a.Filesystem.unixFS
|
||||||
options := &godirwalk.Options{
|
|
||||||
FollowSymbolicLinks: false,
|
|
||||||
Unsorted: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're specifically looking for only certain files, or have requested
|
// If we're specifically looking for only certain files, or have requested
|
||||||
// that certain files be ignored we'll update the callback function to reflect
|
// that certain files be ignored we'll update the callback function to reflect
|
||||||
// that request.
|
// that request.
|
||||||
var callback godirwalk.WalkFunc
|
var callback walkFunc
|
||||||
if len(a.Files) == 0 && len(a.Ignore) > 0 {
|
if len(a.Files) == 0 && len(a.Ignore) > 0 {
|
||||||
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
|
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
|
||||||
|
callback = a.callback(func(_ int, _, relative string, _ ufs.DirEntry) error {
|
||||||
callback = a.callback(pw, func(_ string, rp string) error {
|
if i.MatchesPath(relative) {
|
||||||
if i.MatchesPath(rp) {
|
return SkipThis
|
||||||
return godirwalk.SkipThis
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
} else if len(a.Files) > 0 {
|
} else if len(a.Files) > 0 {
|
||||||
callback = a.withFilesCallback(pw)
|
callback = a.withFilesCallback()
|
||||||
} else {
|
} else {
|
||||||
callback = a.callback(pw)
|
callback = a.callback()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the callback function, wrapped with support for context cancellation.
|
// Open the base directory we were provided.
|
||||||
options.Callback = func(path string, de *godirwalk.Dirent) error {
|
dirfd, name, closeFd, err := fs.SafePath(a.BaseDirectory)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively walk the base directory.
|
||||||
|
return fs.WalkDirat(dirfd, name, func(dirfd int, name, relative string, d ufs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
default:
|
default:
|
||||||
return callback(path, de)
|
return callback(dirfd, name, relative, d)
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
|
|
||||||
// Recursively walk the path we are archiving.
|
|
||||||
return godirwalk.Walk(a.BasePath, options)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Callback function used to determine if a given file should be included in the archive
|
// Callback function used to determine if a given file should be included in the archive
|
||||||
// being generated.
|
// being generated.
|
||||||
func (a *Archive) callback(tw *TarProgress, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
func (a *Archive) callback(opts ...walkFunc) walkFunc {
|
||||||
return func(path string, de *godirwalk.Dirent) error {
|
// Get the base directory we need to strip when walking.
|
||||||
|
//
|
||||||
|
// This is important as when we are walking, the last part of the base directory
|
||||||
|
// is present on all the paths we walk.
|
||||||
|
var base string
|
||||||
|
if a.BaseDirectory != "" {
|
||||||
|
base = filepath.Base(a.BaseDirectory) + "/"
|
||||||
|
}
|
||||||
|
return func(dirfd int, name, relative string, d ufs.DirEntry) error {
|
||||||
// Skip directories because we are walking them recursively.
|
// Skip directories because we are walking them recursively.
|
||||||
if de.IsDir() {
|
if d.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
relative := filepath.ToSlash(strings.TrimPrefix(path, a.BasePath+string(filepath.Separator)))
|
// If base isn't empty, strip it from the relative path. This fixes an
|
||||||
|
// issue when creating an archive starting from a nested directory.
|
||||||
|
//
|
||||||
|
// See https://github.com/pterodactyl/panel/issues/5030 for more details.
|
||||||
|
if base != "" {
|
||||||
|
relative = strings.TrimPrefix(relative, base)
|
||||||
|
}
|
||||||
|
|
||||||
// Call the additional options passed to this callback function. If any of them return
|
// Call the additional options passed to this callback function. If any of them return
|
||||||
// a non-nil error we will exit immediately.
|
// a non-nil error we will exit immediately.
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
if err := opt(path, relative); err != nil {
|
if err := opt(dirfd, name, relative, d); err != nil {
|
||||||
|
if err == SkipThis {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the file to the archive, if it is nested in a directory,
|
// Add the file to the archive, if it is nested in a directory,
|
||||||
// the directory will be automatically "created" in the archive.
|
// the directory will be automatically "created" in the archive.
|
||||||
return a.addToArchive(path, relative, tw)
|
return a.addToArchive(dirfd, name, relative, d)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var SkipThis = errors.New("skip this")
|
||||||
|
|
||||||
// Pushes only files defined in the Files key to the final archive.
|
// Pushes only files defined in the Files key to the final archive.
|
||||||
func (a *Archive) withFilesCallback(tw *TarProgress) func(path string, de *godirwalk.Dirent) error {
|
func (a *Archive) withFilesCallback() walkFunc {
|
||||||
return a.callback(tw, func(p string, rp string) error {
|
return a.callback(func(_ int, _, relative string, _ ufs.DirEntry) error {
|
||||||
for _, f := range a.Files {
|
for _, f := range a.Files {
|
||||||
// Allow exact file matches, otherwise check if file is within a parent directory.
|
// Allow exact file matches, otherwise check if file is within a parent directory.
|
||||||
//
|
//
|
||||||
// The slashes are added in the prefix checks to prevent partial name matches from being
|
// The slashes are added in the prefix checks to prevent partial name matches from being
|
||||||
// included in the archive.
|
// included in the archive.
|
||||||
if f != p && !strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", strings.TrimSuffix(f, "/")+"/") {
|
if f != relative && !strings.HasPrefix(strings.TrimSuffix(relative, "/")+"/", strings.TrimSuffix(f, "/")+"/") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Once we have a match return a nil value here so that the loop stops and the
|
// Once we have a match return a nil value here so that the loop stops and the
|
||||||
// call to this function will correctly include the file in the archive. If there
|
// call to this function will correctly include the file in the archive. If there
|
||||||
// are no matches we'll never make it to this line, and the final error returned
|
// are no matches we'll never make it to this line, and the final error returned
|
||||||
// will be the godirwalk.SkipThis error.
|
// will be the ufs.SkipDir error.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return godirwalk.SkipThis
|
return SkipThis
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds a given file path to the final archive being created.
|
// Adds a given file path to the final archive being created.
|
||||||
func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
|
func (a *Archive) addToArchive(dirfd int, name, relative string, entry ufs.DirEntry) error {
|
||||||
// Lstat the file, this will give us the same information as Stat except that it will not
|
s, err := entry.Info()
|
||||||
// follow a symlink to its target automatically. This is important to avoid including
|
|
||||||
// files that exist outside the server root unintentionally in the backup.
|
|
||||||
s, err := os.Lstat(p)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if errors.Is(err, ufs.ErrNotExist) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errors.WrapIff(err, "failed executing os.Lstat on '%s'", rp)
|
return errors.WrapIff(err, "failed executing os.Lstat on '%s'", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip socket files as they are unsupported by archive/tar.
|
// Skip socket files as they are unsupported by archive/tar.
|
||||||
@ -250,7 +286,7 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// Ignore the not exist errors specifically, since there is nothing important about that.
|
// Ignore the not exist errors specifically, since there is nothing important about that.
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
log.WithField("path", rp).WithField("readlink_err", err.Error()).Warn("failed reading symlink for target path; skipping...")
|
log.WithField("name", name).WithField("readlink_err", err.Error()).Warn("failed reading symlink for target path; skipping...")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -259,17 +295,17 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
|
|||||||
// Get the tar FileInfoHeader in order to add the file to the archive.
|
// Get the tar FileInfoHeader in order to add the file to the archive.
|
||||||
header, err := tar.FileInfoHeader(s, filepath.ToSlash(target))
|
header, err := tar.FileInfoHeader(s, filepath.ToSlash(target))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WrapIff(err, "failed to get tar#FileInfoHeader for '%s'", rp)
|
return errors.WrapIff(err, "failed to get tar#FileInfoHeader for '%s'", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fix the header name if the file is not a symlink.
|
// Fix the header name if the file is not a symlink.
|
||||||
if s.Mode()&fs.ModeSymlink == 0 {
|
if s.Mode()&fs.ModeSymlink == 0 {
|
||||||
header.Name = rp
|
header.Name = relative
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the tar FileInfoHeader to the archive.
|
// Write the tar FileInfoHeader to the archive.
|
||||||
if err := w.WriteHeader(header); err != nil {
|
if err := a.w.WriteHeader(header); err != nil {
|
||||||
return errors.WrapIff(err, "failed to write tar#FileInfoHeader for '%s'", rp)
|
return errors.WrapIff(err, "failed to write tar#FileInfoHeader for '%s'", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the size of the file is less than 1 (most likely for symlinks), skip writing the file.
|
// If the size of the file is less than 1 (most likely for symlinks), skip writing the file.
|
||||||
@ -291,7 +327,7 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open the file.
|
// Open the file.
|
||||||
f, err := os.Open(p)
|
f, err := a.Filesystem.unixFS.OpenFileat(dirfd, name, ufs.O_RDONLY, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
@ -301,9 +337,8 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
// Copy the file's contents to the archive using our buffer.
|
// Copy the file's contents to the archive using our buffer.
|
||||||
if _, err := io.CopyBuffer(w, io.LimitReader(f, header.Size), buf); err != nil {
|
if _, err := io.CopyBuffer(a.w, io.LimitReader(f, header.Size), buf); err != nil {
|
||||||
return errors.WrapIff(err, "failed to copy '%s' to archive", header.Name)
|
return errors.WrapIff(err, "failed to copy '%s' to archive", header.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -10,7 +10,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
. "github.com/franela/goblin"
|
. "github.com/franela/goblin"
|
||||||
"github.com/mholt/archiver/v4"
|
"github.com/mholt/archives"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestArchive_Stream(t *testing.T) {
|
func TestArchive_Stream(t *testing.T) {
|
||||||
@ -20,43 +20,34 @@ func TestArchive_Stream(t *testing.T) {
|
|||||||
g.Describe("Archive", func() {
|
g.Describe("Archive", func() {
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
// Reset the filesystem after each run.
|
// Reset the filesystem after each run.
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
})
|
|
||||||
|
|
||||||
g.It("throws an error when passed invalid file paths", func() {
|
|
||||||
a := &Archive{
|
|
||||||
BasePath: fs.Path(),
|
|
||||||
Files: []string{
|
|
||||||
// To use the archiver properly, this needs to be filepath.Join(BasePath, "yeet")
|
|
||||||
// However, this test tests that we actually validate that behavior.
|
|
||||||
"yeet",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
g.Assert(a.Create(context.Background(), "")).IsNotNil()
|
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("creates archive with intended files", func() {
|
g.It("creates archive with intended files", func() {
|
||||||
g.Assert(fs.CreateDirectory("test", "/")).IsNil()
|
g.Assert(fs.CreateDirectory("test", "/")).IsNil()
|
||||||
g.Assert(fs.CreateDirectory("test2", "/")).IsNil()
|
g.Assert(fs.CreateDirectory("test2", "/")).IsNil()
|
||||||
|
|
||||||
err := fs.Writefile("test/file.txt", strings.NewReader("hello, world!\n"))
|
r := strings.NewReader("hello, world!\n")
|
||||||
|
err := fs.Write("test/file.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Writefile("test2/file.txt", strings.NewReader("hello, world!\n"))
|
r = strings.NewReader("hello, world!\n")
|
||||||
|
err = fs.Write("test2/file.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Writefile("test_file.txt", strings.NewReader("hello, world!\n"))
|
r = strings.NewReader("hello, world!\n")
|
||||||
|
err = fs.Write("test_file.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Writefile("test_file.txt.old", strings.NewReader("hello, world!\n"))
|
r = strings.NewReader("hello, world!\n")
|
||||||
|
err = fs.Write("test_file.txt.old", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
a := &Archive{
|
a := &Archive{
|
||||||
BasePath: fs.Path(),
|
Filesystem: fs,
|
||||||
Files: []string{
|
Files: []string{
|
||||||
filepath.Join(fs.Path(), "test"),
|
"test",
|
||||||
filepath.Join(fs.Path(), "test_file.txt"),
|
"test_file.txt",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -69,11 +60,11 @@ func TestArchive_Stream(t *testing.T) {
|
|||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
// Open the archive.
|
// Open the archive.
|
||||||
genericFs, err := archiver.FileSystem(context.Background(), archivePath)
|
genericFs, err := archives.FileSystem(context.Background(), archivePath, nil)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
// Assert that we are opening an archive.
|
// Assert that we are opening an archive.
|
||||||
afs, ok := genericFs.(archiver.ArchiveFS)
|
afs, ok := genericFs.(iofs.ReadDirFS)
|
||||||
g.Assert(ok).IsTrue()
|
g.Assert(ok).IsTrue()
|
||||||
|
|
||||||
// Get the names of the files recursively from the archive.
|
// Get the names of the files recursively from the archive.
|
||||||
@ -120,9 +111,7 @@ func getFiles(f iofs.ReadDirFS, name string) ([]string, error) {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range files {
|
v = append(v, files...)
|
||||||
v = append(v, f)
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
100
server/filesystem/archiverext/compressed.go
Normal file
100
server/filesystem/archiverext/compressed.go
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2016 Matthew Holt
|
||||||
|
|
||||||
|
// Code in this file was derived from
|
||||||
|
// https://github.com/mholt/archiver/blob/v4.0.0-alpha.8/fs.go
|
||||||
|
//
|
||||||
|
// These modifications were necessary to allow us to use an already open file
|
||||||
|
// with archiver.FileFS.
|
||||||
|
|
||||||
|
package archiverext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
|
||||||
|
"github.com/mholt/archives"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileFS allows accessing a file on disk using a consistent file system interface.
|
||||||
|
// The value should be the path to a regular file, not a directory. This file will
|
||||||
|
// be the only entry in the file system and will be at its root. It can be accessed
|
||||||
|
// within the file system by the name of "." or the filename.
|
||||||
|
//
|
||||||
|
// If the file is compressed, set the Compression field so that reads from the
|
||||||
|
// file will be transparently decompressed.
|
||||||
|
type FileFS struct {
|
||||||
|
// File is the compressed file backing the FileFS.
|
||||||
|
File fs.File
|
||||||
|
|
||||||
|
// If file is compressed, setting this field will
|
||||||
|
// transparently decompress reads.
|
||||||
|
Compression archives.Decompressor
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens the named file, which must be the file used to create the file system.
|
||||||
|
func (f FileFS) Open(name string) (fs.File, error) {
|
||||||
|
if err := f.checkName(name, "open"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if f.Compression == nil {
|
||||||
|
return f.File, nil
|
||||||
|
}
|
||||||
|
r, err := f.Compression.OpenReader(f.File)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return compressedFile{f.File, r}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDir returns a directory listing with the file as the singular entry.
|
||||||
|
func (f FileFS) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||||
|
if err := f.checkName(name, "stat"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
info, err := f.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []fs.DirEntry{fs.FileInfoToDirEntry(info)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat stats the named file, which must be the file used to create the file system.
|
||||||
|
func (f FileFS) Stat(name string) (fs.FileInfo, error) {
|
||||||
|
if err := f.checkName(name, "stat"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.File.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FileFS) checkName(name, op string) error {
|
||||||
|
if !fs.ValidPath(name) {
|
||||||
|
return &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
|
||||||
|
}
|
||||||
|
// TODO: we may need better name validation.
|
||||||
|
if name != "." {
|
||||||
|
return &fs.PathError{Op: op, Path: name, Err: fs.ErrNotExist}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// compressedFile is an fs.File that specially reads
|
||||||
|
// from a decompression reader, and which closes both
|
||||||
|
// that reader and the underlying file.
|
||||||
|
type compressedFile struct {
|
||||||
|
fs.File
|
||||||
|
decomp io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cf compressedFile) Read(p []byte) (int, error) {
|
||||||
|
return cf.decomp.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cf compressedFile) Close() error {
|
||||||
|
err := cf.File.Close()
|
||||||
|
err2 := cf.decomp.Close()
|
||||||
|
if err2 != nil && err == nil {
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
@ -1,25 +1,22 @@
|
|||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"archive/zip"
|
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
iofs "io/fs"
|
iofs "io/fs"
|
||||||
"os"
|
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
gzip2 "github.com/klauspost/compress/gzip"
|
"github.com/klauspost/compress/zip"
|
||||||
zip2 "github.com/klauspost/compress/zip"
|
"github.com/mholt/archives"
|
||||||
"github.com/mholt/archiver/v4"
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
|
"github.com/pterodactyl/wings/server/filesystem/archiverext"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CompressFiles compresses all the files matching the given paths in the
|
// CompressFiles compresses all the files matching the given paths in the
|
||||||
@ -31,46 +28,70 @@ import (
|
|||||||
// All paths are relative to the dir that is passed in as the first argument,
|
// All paths are relative to the dir that is passed in as the first argument,
|
||||||
// and the compressed file will be placed at that location named
|
// and the compressed file will be placed at that location named
|
||||||
// `archive-{date}.tar.gz`.
|
// `archive-{date}.tar.gz`.
|
||||||
func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) {
|
func (fs *Filesystem) CompressFiles(dir string, paths []string) (ufs.FileInfo, error) {
|
||||||
cleanedRootDir, err := fs.SafePath(dir)
|
a := &Archive{Filesystem: fs, BaseDirectory: dir, Files: paths}
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take all the paths passed in and merge them together with the root directory we've gotten.
|
|
||||||
for i, p := range paths {
|
|
||||||
paths[i] = filepath.Join(cleanedRootDir, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
cleaned, err := fs.ParallelSafePath(paths)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
a := &Archive{BasePath: cleanedRootDir, Files: cleaned}
|
|
||||||
d := path.Join(
|
d := path.Join(
|
||||||
cleanedRootDir,
|
dir,
|
||||||
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
|
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
|
||||||
)
|
)
|
||||||
|
f, err := fs.unixFS.OpenFile(d, ufs.O_WRONLY|ufs.O_CREATE, 0o644)
|
||||||
if err := a.Create(context.Background(), d); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Stat(d)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = os.Remove(d)
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
cw := ufs.NewCountedWriter(f)
|
||||||
|
if err := a.Stream(context.Background(), cw); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !fs.unixFS.CanFit(cw.BytesWritten()) {
|
||||||
|
_ = fs.unixFS.Remove(d)
|
||||||
|
return nil, newFilesystemError(ErrCodeDiskSpace, nil)
|
||||||
|
}
|
||||||
|
fs.unixFS.Add(cw.BytesWritten())
|
||||||
|
return f.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) archiverFileSystem(ctx context.Context, p string) (iofs.FS, error) {
|
||||||
|
f, err := fs.unixFS.Open(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Do not use defer to close `f`, it will likely be used later.
|
||||||
|
|
||||||
|
format, _, err := archives.Identify(ctx, filepath.Base(p), f)
|
||||||
|
if err != nil && !errors.Is(err, archives.NoMatch) {
|
||||||
|
_ = f.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fs.HasSpaceFor(f.Size()); err != nil {
|
// Reset the file reader.
|
||||||
_ = os.Remove(d)
|
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||||
|
_ = f.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.addDisk(f.Size())
|
info, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return f, nil
|
if format != nil {
|
||||||
|
switch ff := format.(type) {
|
||||||
|
case archives.Zip:
|
||||||
|
// zip.Reader is more performant than ArchiveFS, because zip.Reader caches content information
|
||||||
|
// and zip.Reader can open several content files concurrently because of io.ReaderAt requirement
|
||||||
|
// while ArchiveFS can't.
|
||||||
|
// zip.Reader doesn't suffer from issue #330 and #310 according to local test (but they should be fixed anyway)
|
||||||
|
return zip.NewReader(f, info.Size())
|
||||||
|
case archives.Extraction:
|
||||||
|
return &archives.ArchiveFS{Stream: io.NewSectionReader(f, 0, info.Size()), Format: ff, Context: ctx}, nil
|
||||||
|
case archives.Compression:
|
||||||
|
return archiverext.FileFS{File: f, Compression: ff}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
return nil, archives.NoMatch
|
||||||
}
|
}
|
||||||
|
|
||||||
// SpaceAvailableForDecompression looks through a given archive and determines
|
// SpaceAvailableForDecompression looks through a given archive and determines
|
||||||
@ -82,24 +103,15 @@ func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir st
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
fsys, err := fs.archiverFileSystem(ctx, filepath.Join(dir, file))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
if errors.Is(err, archives.NoMatch) {
|
||||||
}
|
|
||||||
|
|
||||||
// Get the cached size in a parallel process so that if it is not cached we are not
|
|
||||||
// waiting an unnecessary amount of time on this call.
|
|
||||||
dirSize, err := fs.DiskUsage(false)
|
|
||||||
|
|
||||||
fsys, err := archiver.FileSystem(ctx, source)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, archiver.ErrNoMatch) {
|
|
||||||
return newFilesystemError(ErrCodeUnknownArchive, err)
|
return newFilesystemError(ErrCodeUnknownArchive, err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var size int64
|
var size atomic.Int64
|
||||||
return iofs.WalkDir(fsys, ".", func(path string, d iofs.DirEntry, err error) error {
|
return iofs.WalkDir(fsys, ".", func(path string, d iofs.DirEntry, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -114,7 +126,7 @@ func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir st
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if atomic.AddInt64(&size, info.Size())+dirSize > fs.MaxDisk() {
|
if !fs.unixFS.CanFit(size.Add(info.Size())) {
|
||||||
return newFilesystemError(ErrCodeDiskSpace, nil)
|
return newFilesystemError(ErrCodeDiskSpace, nil)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -128,38 +140,23 @@ func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir st
|
|||||||
// zip-slip attack being attempted by validating that the final path is within
|
// zip-slip attack being attempted by validating that the final path is within
|
||||||
// the server data directory.
|
// the server data directory.
|
||||||
func (fs *Filesystem) DecompressFile(ctx context.Context, dir string, file string) error {
|
func (fs *Filesystem) DecompressFile(ctx context.Context, dir string, file string) error {
|
||||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
f, err := fs.unixFS.Open(filepath.Join(dir, file))
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return fs.DecompressFileUnsafe(ctx, dir, source)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecompressFileUnsafe will decompress any file on the local disk without checking
|
|
||||||
// if it is owned by the server. The file will be SAFELY decompressed and extracted
|
|
||||||
// into the server's directory.
|
|
||||||
func (fs *Filesystem) DecompressFileUnsafe(ctx context.Context, dir string, file string) error {
|
|
||||||
// Ensure that the archive actually exists on the system.
|
|
||||||
if _, err := os.Stat(file); err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Open(file)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
// Identify the type of archive we are dealing with.
|
// Identify the type of archive we are dealing with.
|
||||||
format, input, err := archiver.Identify(filepath.Base(file), f)
|
format, input, err := archives.Identify(ctx, filepath.Base(file), f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, archiver.ErrNoMatch) {
|
if errors.Is(err, archives.NoMatch) {
|
||||||
return newFilesystemError(ErrCodeUnknownArchive, err)
|
return newFilesystemError(ErrCodeUnknownArchive, err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.extractStream(ctx, extractStreamOptions{
|
return fs.extractStream(ctx, extractStreamOptions{
|
||||||
|
FileName: file,
|
||||||
Directory: dir,
|
Directory: dir,
|
||||||
Format: format,
|
Format: format,
|
||||||
Reader: input,
|
Reader: input,
|
||||||
@ -168,14 +165,13 @@ func (fs *Filesystem) DecompressFileUnsafe(ctx context.Context, dir string, file
|
|||||||
|
|
||||||
// ExtractStreamUnsafe .
|
// ExtractStreamUnsafe .
|
||||||
func (fs *Filesystem) ExtractStreamUnsafe(ctx context.Context, dir string, r io.Reader) error {
|
func (fs *Filesystem) ExtractStreamUnsafe(ctx context.Context, dir string, r io.Reader) error {
|
||||||
format, input, err := archiver.Identify("archive.tar.gz", r)
|
format, input, err := archives.Identify(ctx, "archive.tar.gz", r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, archiver.ErrNoMatch) {
|
if errors.Is(err, archives.NoMatch) {
|
||||||
return newFilesystemError(ErrCodeUnknownArchive, err)
|
return newFilesystemError(ErrCodeUnknownArchive, err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.extractStream(ctx, extractStreamOptions{
|
return fs.extractStream(ctx, extractStreamOptions{
|
||||||
Directory: dir,
|
Directory: dir,
|
||||||
Format: format,
|
Format: format,
|
||||||
@ -189,86 +185,99 @@ type extractStreamOptions struct {
|
|||||||
// File name of the archive.
|
// File name of the archive.
|
||||||
FileName string
|
FileName string
|
||||||
// Format of the archive.
|
// Format of the archive.
|
||||||
Format archiver.Format
|
Format archives.Format
|
||||||
// Reader for the archive.
|
// Reader for the archive.
|
||||||
Reader io.Reader
|
Reader io.Reader
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *Filesystem) extractStream(ctx context.Context, opts extractStreamOptions) error {
|
func (fs *Filesystem) extractStream(ctx context.Context, opts extractStreamOptions) error {
|
||||||
// Decompress and extract archive
|
// See if it's a compressed archive, such as TAR or a ZIP
|
||||||
if ex, ok := opts.Format.(archiver.Extractor); ok {
|
ex, ok := opts.Format.(archives.Extractor)
|
||||||
return ex.Extract(ctx, opts.Reader, nil, func(ctx context.Context, f archiver.File) error {
|
if !ok {
|
||||||
if f.IsDir() {
|
// If not, check if it's a single-file compression, such as
|
||||||
return nil
|
// .log.gz, .sql.gz, and so on
|
||||||
|
de, ok := opts.Format.(archives.Decompressor)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip the compression suffix
|
||||||
|
p := filepath.Join(opts.Directory, strings.TrimSuffix(opts.FileName, opts.Format.Extension()))
|
||||||
|
|
||||||
|
// Make sure it's not ignored
|
||||||
|
if err := fs.IsIgnored(p); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, err := de.OpenReader(opts.Reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
// Open the file for creation/writing
|
||||||
|
f, err := fs.unixFS.OpenFile(p, ufs.O_WRONLY|ufs.O_CREATE, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// Read in 4 KB chunks
|
||||||
|
buf := make([]byte, 4096)
|
||||||
|
for {
|
||||||
|
n, err := reader.Read(buf)
|
||||||
|
if n > 0 {
|
||||||
|
|
||||||
|
// Check quota before writing the chunk
|
||||||
|
if quotaErr := fs.HasSpaceFor(int64(n)); quotaErr != nil {
|
||||||
|
return quotaErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the chunk
|
||||||
|
if _, writeErr := f.Write(buf[:n]); writeErr != nil {
|
||||||
|
return writeErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add to quota
|
||||||
|
fs.addDisk(int64(n))
|
||||||
}
|
}
|
||||||
p := filepath.Join(opts.Directory, ExtractNameFromArchive(f))
|
|
||||||
// If it is ignored, just don't do anything with the file and skip over it.
|
|
||||||
if err := fs.IsIgnored(p); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r, err := f.Open()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// EOF are expected
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return any other
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer r.Close()
|
|
||||||
if err := fs.Writefile(p, r); err != nil {
|
|
||||||
return wrapError(err, opts.FileName)
|
|
||||||
}
|
|
||||||
// Update the file permissions to the one set in the archive.
|
|
||||||
if err := fs.Chmod(p, f.Mode()); err != nil {
|
|
||||||
return wrapError(err, opts.FileName)
|
|
||||||
}
|
|
||||||
// Update the file modification time to the one set in the archive.
|
|
||||||
if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
|
|
||||||
return wrapError(err, opts.FileName)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractNameFromArchive looks at an archive file to try and determine the name
|
|
||||||
// for a given element in an archive. Because of... who knows why, each file type
|
|
||||||
// uses different methods to determine the file name.
|
|
||||||
//
|
|
||||||
// If there is a archiver.File#Sys() value present we will try to use the name
|
|
||||||
// present in there, otherwise falling back to archiver.File#Name() if all else
|
|
||||||
// fails. Without this logic present, some archive types such as zip/tars/etc.
|
|
||||||
// will write all of the files to the base directory, rather than the nested
|
|
||||||
// directory that is expected.
|
|
||||||
//
|
|
||||||
// For files like ".rar" types, there is no f.Sys() value present, and the value
|
|
||||||
// of archiver.File#Name() will be what you need.
|
|
||||||
func ExtractNameFromArchive(f archiver.File) string {
|
|
||||||
sys := f.Sys()
|
|
||||||
// Some archive types won't have a value returned when you call f.Sys() on them,
|
|
||||||
// such as ".rar" archives for example. In those cases the only thing you can do
|
|
||||||
// is hope that "f.Name()" is actually correct for them.
|
|
||||||
if sys == nil {
|
|
||||||
return f.Name()
|
|
||||||
}
|
|
||||||
switch s := sys.(type) {
|
|
||||||
case *zip.FileHeader:
|
|
||||||
return s.Name
|
|
||||||
case *zip2.FileHeader:
|
|
||||||
return s.Name
|
|
||||||
case *tar.Header:
|
|
||||||
return s.Name
|
|
||||||
case *gzip.Header:
|
|
||||||
return s.Name
|
|
||||||
case *gzip2.Header:
|
|
||||||
return s.Name
|
|
||||||
default:
|
|
||||||
// At this point we cannot figure out what type of archive this might be so
|
|
||||||
// just try to find the name field in the struct. If it is found return it.
|
|
||||||
field := reflect.Indirect(reflect.ValueOf(sys)).FieldByName("Name")
|
|
||||||
if field.IsValid() {
|
|
||||||
return field.String()
|
|
||||||
}
|
}
|
||||||
// Fallback to the basename of the file at this point. There is nothing we can really
|
|
||||||
// do to try and figure out what the underlying directory of the file is supposed to
|
return nil
|
||||||
// be since it didn't implement a name field.
|
|
||||||
return f.Name()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Decompress and extract archive
|
||||||
|
return ex.Extract(ctx, opts.Reader, func(ctx context.Context, f archives.FileInfo) error {
|
||||||
|
if f.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p := filepath.Join(opts.Directory, f.NameInArchive)
|
||||||
|
// If it is ignored, just don't do anything with the file and skip over it.
|
||||||
|
if err := fs.IsIgnored(p); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r, err := f.Open()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
if err := fs.Write(p, r, f.Size(), f.Mode()); err != nil {
|
||||||
|
return wrapError(err, opts.FileName)
|
||||||
|
}
|
||||||
|
// Update the file modification time to the one set in the archive.
|
||||||
|
if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
|
||||||
|
return wrapError(err, opts.FileName)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,17 +3,18 @@ package filesystem
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
. "github.com/franela/goblin"
|
. "github.com/franela/goblin"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Given an archive named test.{ext}, with the following file structure:
|
// Given an archive named test.{ext}, with the following file structure:
|
||||||
|
//
|
||||||
// test/
|
// test/
|
||||||
// |──inside/
|
// |──inside/
|
||||||
// |────finside.txt
|
// |────finside.txt
|
||||||
// |──outside.txt
|
// |──outside.txt
|
||||||
|
//
|
||||||
// this test will ensure that it's being decompressed as expected
|
// this test will ensure that it's being decompressed as expected
|
||||||
func TestFilesystem_DecompressFile(t *testing.T) {
|
func TestFilesystem_DecompressFile(t *testing.T) {
|
||||||
g := Goblin(t)
|
g := Goblin(t)
|
||||||
@ -47,9 +48,7 @@ func TestFilesystem_DecompressFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
|
||||||
atomic.StoreInt64(&fs.diskLimit, 0)
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,26 +1,29 @@
|
|||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
"slices"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/karrick/godirwalk"
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SpaceCheckingOpts struct {
|
type SpaceCheckingOpts struct {
|
||||||
AllowStaleResponse bool
|
AllowStaleResponse bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: can this be replaced with some sort of atomic? Like atomic.Pointer?
|
||||||
type usageLookupTime struct {
|
type usageLookupTime struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
value time.Time
|
value time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the last time that a disk space lookup was performed.
|
// Set sets the last time that a disk space lookup was performed.
|
||||||
func (ult *usageLookupTime) Set(t time.Time) {
|
func (ult *usageLookupTime) Set(t time.Time) {
|
||||||
ult.Lock()
|
ult.Lock()
|
||||||
ult.value = t
|
ult.value = t
|
||||||
@ -35,14 +38,15 @@ func (ult *usageLookupTime) Get() time.Time {
|
|||||||
return ult.value
|
return ult.value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the maximum amount of disk space that this Filesystem instance is allowed to use.
|
// MaxDisk returns the maximum amount of disk space that this Filesystem
|
||||||
|
// instance is allowed to use.
|
||||||
func (fs *Filesystem) MaxDisk() int64 {
|
func (fs *Filesystem) MaxDisk() int64 {
|
||||||
return atomic.LoadInt64(&fs.diskLimit)
|
return fs.unixFS.Limit()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets the disk space limit for this Filesystem instance.
|
// SetDiskLimit sets the disk space limit for this Filesystem instance.
|
||||||
func (fs *Filesystem) SetDiskLimit(i int64) {
|
func (fs *Filesystem) SetDiskLimit(i int64) {
|
||||||
atomic.SwapInt64(&fs.diskLimit, i)
|
fs.unixFS.SetLimit(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The same concept as HasSpaceAvailable however this will return an error if there is
|
// The same concept as HasSpaceAvailable however this will return an error if there is
|
||||||
@ -65,7 +69,7 @@ func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
|
|||||||
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
||||||
size, err := fs.DiskUsage(allowStaleValue)
|
size, err := fs.DiskUsage(allowStaleValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithField("root", fs.root).WithField("error", err).Warn("failed to determine root fs directory size")
|
log.WithField("root", fs.Path()).WithField("error", err).Warn("failed to determine root fs directory size")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
||||||
@ -84,7 +88,7 @@ func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
|||||||
// function for critical logical checks. It should only be used in areas where the actual disk usage
|
// function for critical logical checks. It should only be used in areas where the actual disk usage
|
||||||
// does not need to be perfect, e.g. API responses for server resource usage.
|
// does not need to be perfect, e.g. API responses for server resource usage.
|
||||||
func (fs *Filesystem) CachedUsage() int64 {
|
func (fs *Filesystem) CachedUsage() int64 {
|
||||||
return atomic.LoadInt64(&fs.diskUsed)
|
return fs.unixFS.Usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Internal helper function to allow other parts of the codebase to check the total used disk space
|
// Internal helper function to allow other parts of the codebase to check the total used disk space
|
||||||
@ -114,14 +118,14 @@ func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
|
|||||||
// currently performing a lookup, just do the disk usage calculation in the background.
|
// currently performing a lookup, just do the disk usage calculation in the background.
|
||||||
go func(fs *Filesystem) {
|
go func(fs *Filesystem) {
|
||||||
if _, err := fs.updateCachedDiskUsage(); err != nil {
|
if _, err := fs.updateCachedDiskUsage(); err != nil {
|
||||||
log.WithField("root", fs.root).WithField("error", err).Warn("failed to update fs disk usage from within routine")
|
log.WithField("root", fs.Path()).WithField("error", err).Warn("failed to update fs disk usage from within routine")
|
||||||
}
|
}
|
||||||
}(fs)
|
}(fs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the currently cached value back to the calling function.
|
// Return the currently cached value back to the calling function.
|
||||||
return atomic.LoadInt64(&fs.diskUsed), nil
|
return fs.unixFS.Usage(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Updates the currently used disk space for a server.
|
// Updates the currently used disk space for a server.
|
||||||
@ -149,63 +153,56 @@ func (fs *Filesystem) updateCachedDiskUsage() (int64, error) {
|
|||||||
// error encountered.
|
// error encountered.
|
||||||
fs.lastLookupTime.Set(time.Now())
|
fs.lastLookupTime.Set(time.Now())
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, size)
|
fs.unixFS.SetUsage(size)
|
||||||
|
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determines the directory size of a given location by running parallel tasks to iterate
|
// DirectorySize calculates the size of a directory and its descendants.
|
||||||
// through all of the folders. Returns the size in bytes. This can be a fairly taxing operation
|
func (fs *Filesystem) DirectorySize(root string) (int64, error) {
|
||||||
// on locations with tons of files, so it is recommended that you cache the output.
|
dirfd, name, closeFd, err := fs.unixFS.SafePath(root)
|
||||||
func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
|
defer closeFd()
|
||||||
d, err := fs.SafePath(dir)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var size int64
|
var hardLinks []uint64
|
||||||
var st syscall.Stat_t
|
|
||||||
|
|
||||||
err = godirwalk.Walk(d, &godirwalk.Options{
|
var size atomic.Int64
|
||||||
Unsorted: true,
|
err = fs.unixFS.WalkDirat(dirfd, name, func(dirfd int, name, _ string, d ufs.DirEntry, err error) error {
|
||||||
Callback: func(p string, e *godirwalk.Dirent) error {
|
if err != nil {
|
||||||
// If this is a symlink then resolve the final destination of it before trying to continue walking
|
return errors.Wrap(err, "walkdirat err")
|
||||||
// over its contents. If it resolves outside the server data directory just skip everything else for
|
}
|
||||||
// it. Otherwise, allow it to continue.
|
|
||||||
if e.IsSymlink() {
|
|
||||||
if _, err := fs.SafePath(p); err != nil {
|
|
||||||
if IsErrorCode(err, ErrCodePathResolution) {
|
|
||||||
return godirwalk.SkipThis
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !e.IsDir() {
|
|
||||||
_ = syscall.Lstat(p, &st)
|
|
||||||
atomic.AddInt64(&size, st.Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// Only calculate the size of regular files.
|
||||||
|
if !d.Type().IsRegular() {
|
||||||
return nil
|
return nil
|
||||||
},
|
}
|
||||||
})
|
|
||||||
|
|
||||||
return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
|
info, err := fs.unixFS.Lstatat(dirfd, name)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "lstatat err")
|
||||||
|
}
|
||||||
|
|
||||||
|
var sysFileInfo = info.Sys().(*unix.Stat_t)
|
||||||
|
if sysFileInfo.Nlink > 1 {
|
||||||
|
// Hard links have the same inode number
|
||||||
|
if slices.Contains(hardLinks, sysFileInfo.Ino) {
|
||||||
|
// Don't add hard links size twice
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
hardLinks = append(hardLinks, sysFileInfo.Ino)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size.Add(info.Size())
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return size.Load(), errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to determine if a server has space available for a file of a given size.
|
|
||||||
// If space is available, no error will be returned, otherwise an ErrNotEnoughSpace error
|
|
||||||
// will be raised.
|
|
||||||
func (fs *Filesystem) HasSpaceFor(size int64) error {
|
func (fs *Filesystem) HasSpaceFor(size int64) error {
|
||||||
if fs.MaxDisk() == 0 {
|
if !fs.unixFS.CanFit(size) {
|
||||||
return nil
|
|
||||||
}
|
|
||||||
s, err := fs.DiskUsage(true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if (s + size) > fs.MaxDisk() {
|
|
||||||
return newFilesystemError(ErrCodeDiskSpace, nil)
|
return newFilesystemError(ErrCodeDiskSpace, nil)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -213,24 +210,5 @@ func (fs *Filesystem) HasSpaceFor(size int64) error {
|
|||||||
|
|
||||||
// Updates the disk usage for the Filesystem instance.
|
// Updates the disk usage for the Filesystem instance.
|
||||||
func (fs *Filesystem) addDisk(i int64) int64 {
|
func (fs *Filesystem) addDisk(i int64) int64 {
|
||||||
size := atomic.LoadInt64(&fs.diskUsed)
|
return fs.unixFS.Add(i)
|
||||||
|
|
||||||
// Sorry go gods. This is ugly but the best approach I can come up with for right
|
|
||||||
// now without completely re-evaluating the logic we use for determining disk space.
|
|
||||||
//
|
|
||||||
// Normally I would just be using the atomic load right below, but I'm not sure about
|
|
||||||
// the scenarios where it is 0 because nothing has run that would trigger a disk size
|
|
||||||
// calculation?
|
|
||||||
//
|
|
||||||
// Perhaps that isn't even a concern for the sake of this?
|
|
||||||
if !fs.isTest {
|
|
||||||
size, _ = fs.DiskUsage(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're dropping below 0 somehow just cap it to 0.
|
|
||||||
if (size + i) < 0 {
|
|
||||||
return atomic.SwapInt64(&fs.diskUsed, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return atomic.AddInt64(&fs.diskUsed, i)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,11 +2,12 @@ package filesystem
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ErrorCode string
|
type ErrorCode string
|
||||||
@ -86,15 +87,15 @@ func (e *Error) Unwrap() error {
|
|||||||
|
|
||||||
// Generates an error logger instance with some basic information.
|
// Generates an error logger instance with some basic information.
|
||||||
func (fs *Filesystem) error(err error) *log.Entry {
|
func (fs *Filesystem) error(err error) *log.Entry {
|
||||||
return log.WithField("subsystem", "filesystem").WithField("root", fs.root).WithField("error", err)
|
return log.WithField("subsystem", "filesystem").WithField("root", fs.Path()).WithField("error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle errors encountered when walking through directories.
|
// Handle errors encountered when walking through directories.
|
||||||
//
|
//
|
||||||
// If there is a path resolution error just skip the item entirely. Only return this for a
|
// If there is a path resolution error just skip the item entirely. Only return this for a
|
||||||
// directory, otherwise return nil. Returning this error for a file will stop the walking
|
// directory, otherwise return nil. Returning this error for a file will stop the walking
|
||||||
// for the remainder of the directory. This is assuming an os.FileInfo struct was even returned.
|
// for the remainder of the directory. This is assuming an FileInfo struct was even returned.
|
||||||
func (fs *Filesystem) handleWalkerError(err error, f os.FileInfo) error {
|
func (fs *Filesystem) handleWalkerError(err error, f ufs.FileInfo) error {
|
||||||
if !IsErrorCode(err, ErrCodePathResolution) {
|
if !IsErrorCode(err, ErrCodePathResolution) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,13 +1,11 @@
|
|||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -15,220 +13,208 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/gabriel-vasile/mimetype"
|
"github.com/gabriel-vasile/mimetype"
|
||||||
"github.com/karrick/godirwalk"
|
|
||||||
ignore "github.com/sabhiram/go-gitignore"
|
ignore "github.com/sabhiram/go-gitignore"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Filesystem struct {
|
type Filesystem struct {
|
||||||
|
unixFS *ufs.Quota
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
lastLookupTime *usageLookupTime
|
lastLookupTime *usageLookupTime
|
||||||
lookupInProgress *system.AtomicBool
|
lookupInProgress atomic.Bool
|
||||||
diskUsed int64
|
|
||||||
diskCheckInterval time.Duration
|
diskCheckInterval time.Duration
|
||||||
denylist *ignore.GitIgnore
|
denylist *ignore.GitIgnore
|
||||||
|
|
||||||
// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
|
|
||||||
diskLimit int64
|
|
||||||
|
|
||||||
// The root data directory path for this Filesystem instance.
|
|
||||||
root string
|
|
||||||
|
|
||||||
isTest bool
|
isTest bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new Filesystem instance for a given server.
|
// New creates a new Filesystem instance for a given server.
|
||||||
func New(root string, size int64, denylist []string) *Filesystem {
|
func New(root string, size int64, denylist []string) (*Filesystem, error) {
|
||||||
|
if err := os.MkdirAll(root, 0o755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
unixFS, err := ufs.NewUnixFS(root, config.UseOpenat2())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
quota := ufs.NewQuota(unixFS, size)
|
||||||
|
|
||||||
return &Filesystem{
|
return &Filesystem{
|
||||||
root: root,
|
unixFS: quota,
|
||||||
diskLimit: size,
|
|
||||||
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
|
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
|
||||||
lastLookupTime: &usageLookupTime{},
|
lastLookupTime: &usageLookupTime{},
|
||||||
lookupInProgress: system.NewAtomicBool(false),
|
|
||||||
denylist: ignore.CompileIgnoreLines(denylist...),
|
denylist: ignore.CompileIgnoreLines(denylist...),
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Path returns the root path for the Filesystem instance.
|
// Path returns the root path for the Filesystem instance.
|
||||||
func (fs *Filesystem) Path() string {
|
func (fs *Filesystem) Path() string {
|
||||||
return fs.root
|
return fs.unixFS.BasePath()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDir reads directory entries.
|
||||||
|
func (fs *Filesystem) ReadDir(path string) ([]ufs.DirEntry, error) {
|
||||||
|
return fs.unixFS.ReadDir(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDirStat is like ReadDir except that it returns FileInfo for each entry
|
||||||
|
// instead of just a DirEntry.
|
||||||
|
func (fs *Filesystem) ReadDirStat(path string) ([]ufs.FileInfo, error) {
|
||||||
|
return ufs.ReadDirMap(fs.unixFS.UnixFS, path, func(e ufs.DirEntry) (ufs.FileInfo, error) {
|
||||||
|
return e.Info()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// File returns a reader for a file instance as well as the stat information.
|
// File returns a reader for a file instance as well as the stat information.
|
||||||
func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
|
func (fs *Filesystem) File(p string) (ufs.File, Stat, error) {
|
||||||
cleaned, err := fs.SafePath(p)
|
f, err := fs.unixFS.Open(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, Stat{}, errors.WithStackIf(err)
|
return nil, Stat{}, err
|
||||||
}
|
}
|
||||||
st, err := fs.Stat(cleaned)
|
st, err := statFromFile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
_ = f.Close()
|
||||||
return nil, Stat{}, newFilesystemError(ErrNotExist, err)
|
return nil, Stat{}, err
|
||||||
}
|
|
||||||
return nil, Stat{}, errors.WithStackIf(err)
|
|
||||||
}
|
|
||||||
if st.IsDir() {
|
|
||||||
return nil, Stat{}, newFilesystemError(ErrCodeIsDirectory, nil)
|
|
||||||
}
|
|
||||||
f, err := os.Open(cleaned)
|
|
||||||
if err != nil {
|
|
||||||
return nil, Stat{}, errors.WithStackIf(err)
|
|
||||||
}
|
}
|
||||||
return f, st, nil
|
return f, st, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) UnixFS() *ufs.UnixFS {
|
||||||
|
return fs.unixFS.UnixFS
|
||||||
|
}
|
||||||
|
|
||||||
// Touch acts by creating the given file and path on the disk if it is not present
|
// Touch acts by creating the given file and path on the disk if it is not present
|
||||||
// already. If it is present, the file is opened using the defaults which will truncate
|
// already. If it is present, the file is opened using the defaults which will truncate
|
||||||
// the contents. The opened file is then returned to the caller.
|
// the contents. The opened file is then returned to the caller.
|
||||||
func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
|
func (fs *Filesystem) Touch(p string, flag int) (ufs.File, error) {
|
||||||
cleaned, err := fs.SafePath(p)
|
return fs.unixFS.Touch(p, flag, 0o644)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(cleaned, flag, 0o644)
|
|
||||||
if err == nil {
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
if f != nil {
|
|
||||||
_ = f.Close()
|
|
||||||
}
|
|
||||||
// If the error is not because it doesn't exist then we just need to bail at this point.
|
|
||||||
if !errors.Is(err, os.ErrNotExist) {
|
|
||||||
return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file handle")
|
|
||||||
}
|
|
||||||
// Only create and chown the directory if it doesn't exist.
|
|
||||||
if _, err := os.Stat(filepath.Dir(cleaned)); errors.Is(err, os.ErrNotExist) {
|
|
||||||
// Create the path leading up to the file we're trying to create, setting the final perms
|
|
||||||
// on it as we go.
|
|
||||||
if err := os.MkdirAll(filepath.Dir(cleaned), 0o755); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "server/filesystem: touch: failed to create directory tree")
|
|
||||||
}
|
|
||||||
if err := fs.Chown(filepath.Dir(cleaned)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o := &fileOpener{}
|
|
||||||
// Try to open the file now that we have created the pathing necessary for it, and then
|
|
||||||
// Chown that file so that the permissions don't mess with things.
|
|
||||||
f, err = o.open(cleaned, flag, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file with wait")
|
|
||||||
}
|
|
||||||
_ = fs.Chown(cleaned)
|
|
||||||
return f, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Writefile writes a file to the system. If the file does not already exist one
|
// Writefile writes a file to the system. If the file does not already exist one
|
||||||
// will be created. This will also properly recalculate the disk space used by
|
// will be created. This will also properly recalculate the disk space used by
|
||||||
// the server when writing new files or modifying existing ones.
|
// the server when writing new files or modifying existing ones.
|
||||||
|
//
|
||||||
|
// DEPRECATED: use `Write` instead.
|
||||||
func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
||||||
cleaned, err := fs.SafePath(p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var currentSize int64
|
var currentSize int64
|
||||||
// If the file does not exist on the system already go ahead and create the pathway
|
st, err := fs.unixFS.Stat(p)
|
||||||
// to it and an empty file. We'll then write to it later on after this completes.
|
if err != nil && !errors.Is(err, ufs.ErrNotExist) {
|
||||||
stat, err := os.Stat(cleaned)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
|
return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
if stat.IsDir() {
|
if st.IsDir() {
|
||||||
return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: cleaned})
|
// TODO: resolved
|
||||||
|
return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: ""})
|
||||||
}
|
}
|
||||||
currentSize = stat.Size()
|
currentSize = st.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Touch the file and return the handle to it at this point. This will
|
||||||
|
// create or truncate the file, and create any necessary parent directories
|
||||||
|
// if they are missing.
|
||||||
|
file, err := fs.unixFS.Touch(p, ufs.O_RDWR|ufs.O_TRUNC, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error touching file: %w", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
// Do not use CopyBuffer here, it is wasteful as the file implements
|
||||||
|
// io.ReaderFrom, which causes it to not use the buffer anyways.
|
||||||
|
n, err := io.Copy(file, r)
|
||||||
|
|
||||||
|
// Adjust the disk usage to account for the old size and the new size of the file.
|
||||||
|
fs.unixFS.Add(n - currentSize)
|
||||||
|
|
||||||
|
if err := fs.chownFile(p); err != nil {
|
||||||
|
return fmt.Errorf("error chowning file: %w", err)
|
||||||
|
}
|
||||||
|
// Return the error from io.Copy.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) Write(p string, r io.Reader, newSize int64, mode ufs.FileMode) error {
|
||||||
|
var currentSize int64
|
||||||
|
st, err := fs.unixFS.Stat(p)
|
||||||
|
if err != nil && !errors.Is(err, ufs.ErrNotExist) {
|
||||||
|
return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
|
||||||
|
} else if err == nil {
|
||||||
|
if st.IsDir() {
|
||||||
|
// TODO: resolved
|
||||||
|
return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: ""})
|
||||||
|
}
|
||||||
|
currentSize = st.Size()
|
||||||
}
|
}
|
||||||
|
|
||||||
br := bufio.NewReader(r)
|
|
||||||
// Check that the new size we're writing to the disk can fit. If there is currently
|
// Check that the new size we're writing to the disk can fit. If there is currently
|
||||||
// a file we'll subtract that current file size from the size of the buffer to determine
|
// a file we'll subtract that current file size from the size of the buffer to determine
|
||||||
// the amount of new data we're writing (or amount we're removing if smaller).
|
// the amount of new data we're writing (or amount we're removing if smaller).
|
||||||
if err := fs.HasSpaceFor(int64(br.Size()) - currentSize); err != nil {
|
if err := fs.HasSpaceFor(newSize - currentSize); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Touch the file and return the handle to it at this point. This will create the file,
|
// Touch the file and return the handle to it at this point. This will
|
||||||
// any necessary directories, and set the proper owner of the file.
|
// create or truncate the file, and create any necessary parent directories
|
||||||
file, err := fs.Touch(cleaned, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
|
// if they are missing.
|
||||||
|
file, err := fs.unixFS.Touch(p, ufs.O_RDWR|ufs.O_TRUNC, mode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
buf := make([]byte, 1024*4)
|
if newSize == 0 {
|
||||||
sz, err := io.CopyBuffer(file, r, buf)
|
// Subtract the previous size of the file if the new size is 0.
|
||||||
|
fs.unixFS.Add(-currentSize)
|
||||||
|
} else {
|
||||||
|
// Do not use CopyBuffer here, it is wasteful as the file implements
|
||||||
|
// io.ReaderFrom, which causes it to not use the buffer anyways.
|
||||||
|
var n int64
|
||||||
|
n, err = io.Copy(file, io.LimitReader(r, newSize))
|
||||||
|
|
||||||
// Adjust the disk usage to account for the old size and the new size of the file.
|
// Adjust the disk usage to account for the old size and the new size of the file.
|
||||||
fs.addDisk(sz - currentSize)
|
fs.unixFS.Add(n - currentSize)
|
||||||
|
}
|
||||||
|
|
||||||
return fs.unsafeChown(cleaned)
|
if err := fs.chownFile(p); err != nil {
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new directory (name) at a specified path (p) for the server.
|
|
||||||
func (fs *Filesystem) CreateDirectory(name string, p string) error {
|
|
||||||
cleaned, err := fs.SafePath(path.Join(p, name))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return os.MkdirAll(cleaned, 0o755)
|
// Return any remaining error.
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rename moves (or renames) a file or directory.
|
// CreateDirectory creates a new directory (name) at a specified path (p) for
|
||||||
func (fs *Filesystem) Rename(from string, to string) error {
|
// the server.
|
||||||
cleanedFrom, err := fs.SafePath(from)
|
func (fs *Filesystem) CreateDirectory(name string, p string) error {
|
||||||
if err != nil {
|
return fs.unixFS.MkdirAll(filepath.Join(p, name), 0o755)
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanedTo, err := fs.SafePath(to)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the target file or directory already exists the rename function will fail, so just
|
|
||||||
// bail out now.
|
|
||||||
if _, err := os.Stat(cleanedTo); err == nil {
|
|
||||||
return os.ErrExist
|
|
||||||
}
|
|
||||||
|
|
||||||
if cleanedTo == fs.Path() {
|
|
||||||
return errors.New("attempting to rename into an invalid directory space")
|
|
||||||
}
|
|
||||||
|
|
||||||
d := strings.TrimSuffix(cleanedTo, path.Base(cleanedTo))
|
|
||||||
// Ensure that the directory we're moving into exists correctly on the system. Only do this if
|
|
||||||
// we're not at the root directory level.
|
|
||||||
if d != fs.Path() {
|
|
||||||
if mkerr := os.MkdirAll(d, 0o755); mkerr != nil {
|
|
||||||
return errors.WithMessage(mkerr, "failed to create directory structure for file rename")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Rename(cleanedFrom, cleanedTo); err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recursively iterates over a file or directory and sets the permissions on all of the
|
func (fs *Filesystem) Rename(oldpath, newpath string) error {
|
||||||
|
return fs.unixFS.Rename(oldpath, newpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) Symlink(oldpath, newpath string) error {
|
||||||
|
return fs.unixFS.Symlink(oldpath, newpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) chownFile(name string) error {
|
||||||
|
if fs.isTest {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
uid := config.Get().System.User.Uid
|
||||||
|
gid := config.Get().System.User.Gid
|
||||||
|
return fs.unixFS.Lchown(name, uid, gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chown recursively iterates over a file or directory and sets the permissions on all of the
|
||||||
// underlying files. Iterate over all of the files and directories. If it is a file just
|
// underlying files. Iterate over all of the files and directories. If it is a file just
|
||||||
// go ahead and perform the chown operation. Otherwise dig deeper into the directory until
|
// go ahead and perform the chown operation. Otherwise dig deeper into the directory until
|
||||||
// we've run out of directories to dig into.
|
// we've run out of directories to dig into.
|
||||||
func (fs *Filesystem) Chown(path string) error {
|
func (fs *Filesystem) Chown(p string) error {
|
||||||
cleaned, err := fs.SafePath(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return fs.unsafeChown(cleaned)
|
|
||||||
}
|
|
||||||
|
|
||||||
// unsafeChown chowns the given path, without checking if the path is safe. This should only be used
|
|
||||||
// when the path has already been checked.
|
|
||||||
func (fs *Filesystem) unsafeChown(path string) error {
|
|
||||||
if fs.isTest {
|
if fs.isTest {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -236,54 +222,44 @@ func (fs *Filesystem) unsafeChown(path string) error {
|
|||||||
uid := config.Get().System.User.Uid
|
uid := config.Get().System.User.Uid
|
||||||
gid := config.Get().System.User.Gid
|
gid := config.Get().System.User.Gid
|
||||||
|
|
||||||
|
dirfd, name, closeFd, err := fs.unixFS.SafePath(p)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Start by just chowning the initial path that we received.
|
// Start by just chowning the initial path that we received.
|
||||||
if err := os.Chown(path, uid, gid); err != nil {
|
if err := fs.unixFS.Lchownat(dirfd, name, uid, gid); err != nil {
|
||||||
return errors.Wrap(err, "server/filesystem: chown: failed to chown path")
|
return errors.Wrap(err, "server/filesystem: chown: failed to chown path")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is not a directory we can now return from the function, there is nothing
|
// If this is not a directory we can now return from the function, there is nothing
|
||||||
// left that we need to do.
|
// left that we need to do.
|
||||||
if st, err := os.Stat(path); err != nil || !st.IsDir() {
|
if st, err := fs.unixFS.Lstatat(dirfd, name); err != nil || !st.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this was a directory, begin walking over its contents recursively and ensure that all
|
// This walker is probably some of the most efficient code in Wings. It has
|
||||||
// of the subfiles and directories get their permissions updated as well.
|
// an internally re-used buffer for listing directory entries and doesn't
|
||||||
err := godirwalk.Walk(path, &godirwalk.Options{
|
// need to check if every individual path it touches is safe as the code
|
||||||
Unsorted: true,
|
// doesn't traverse symlinks, is immune to symlink timing attacks, and
|
||||||
Callback: func(p string, e *godirwalk.Dirent) error {
|
// gives us a dirfd and file name to make a direct syscall with.
|
||||||
// Do not attempt to chown a symlink. Go's os.Chown function will affect the symlink
|
if err := fs.unixFS.WalkDirat(dirfd, name, func(dirfd int, name, _ string, info ufs.DirEntry, err error) error {
|
||||||
// so if it points to a location outside the data directory the user would be able to
|
if err != nil {
|
||||||
// (un)intentionally modify that files permissions.
|
return err
|
||||||
if e.IsSymlink() {
|
}
|
||||||
if e.IsDir() {
|
if err := fs.unixFS.Lchownat(dirfd, name, uid, gid); err != nil {
|
||||||
return godirwalk.SkipThis
|
return err
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
return nil
|
}); err != nil {
|
||||||
}
|
return fmt.Errorf("server/filesystem: chown: failed to chown during walk function: %w", err)
|
||||||
|
}
|
||||||
return os.Chown(p, uid, gid)
|
return nil
|
||||||
},
|
|
||||||
})
|
|
||||||
return errors.Wrap(err, "server/filesystem: chown: failed to chown during walk function")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *Filesystem) Chmod(path string, mode os.FileMode) error {
|
func (fs *Filesystem) Chmod(path string, mode ufs.FileMode) error {
|
||||||
cleaned, err := fs.SafePath(path)
|
return fs.unixFS.Chmod(path, mode)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if fs.isTest {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Chmod(cleaned, mode); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Begin looping up to 50 times to try and create a unique copy file name. This will take
|
// Begin looping up to 50 times to try and create a unique copy file name. This will take
|
||||||
@ -294,7 +270,7 @@ func (fs *Filesystem) Chmod(path string, mode os.FileMode) error {
|
|||||||
// Could probably make this more efficient by checking if there are any files matching the copy
|
// Could probably make this more efficient by checking if there are any files matching the copy
|
||||||
// pattern, and trying to find the highest number and then incrementing it by one rather than
|
// pattern, and trying to find the highest number and then incrementing it by one rather than
|
||||||
// looping endlessly.
|
// looping endlessly.
|
||||||
func (fs *Filesystem) findCopySuffix(dir string, name string, extension string) (string, error) {
|
func (fs *Filesystem) findCopySuffix(dirfd int, name, extension string) (string, error) {
|
||||||
var i int
|
var i int
|
||||||
suffix := " copy"
|
suffix := " copy"
|
||||||
|
|
||||||
@ -306,11 +282,10 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
|
|||||||
n := name + suffix + extension
|
n := name + suffix + extension
|
||||||
// If we stat the file and it does not exist that means we're good to create the copy. If it
|
// If we stat the file and it does not exist that means we're good to create the copy. If it
|
||||||
// does exist, we'll just continue to the next loop and try again.
|
// does exist, we'll just continue to the next loop and try again.
|
||||||
if _, err := fs.Stat(path.Join(dir, n)); err != nil {
|
if _, err := fs.unixFS.Lstatat(dirfd, n); err != nil {
|
||||||
if !errors.Is(err, os.ErrNotExist) {
|
if !errors.Is(err, ufs.ErrNotExist) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -322,53 +297,68 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
|
|||||||
return name + suffix + extension, nil
|
return name + suffix + extension, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copies a given file to the same location and appends a suffix to the file to indicate that
|
// Copy copies a given file to the same location and appends a suffix to the
|
||||||
// it has been copied.
|
// file to indicate that it has been copied.
|
||||||
func (fs *Filesystem) Copy(p string) error {
|
func (fs *Filesystem) Copy(p string) error {
|
||||||
cleaned, err := fs.SafePath(p)
|
dirfd, name, closeFd, err := fs.unixFS.SafePath(p)
|
||||||
|
defer closeFd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
source, err := fs.unixFS.OpenFileat(dirfd, name, ufs.O_RDONLY, 0)
|
||||||
s, err := os.Stat(cleaned)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if s.IsDir() || !s.Mode().IsRegular() {
|
|
||||||
// If this is a directory or not a regular file, just throw a not-exist error
|
|
||||||
// since anything calling this function should understand what that means.
|
|
||||||
return os.ErrNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that copying this file wouldn't put the server over its limit.
|
|
||||||
if err := fs.HasSpaceFor(s.Size()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
base := filepath.Base(cleaned)
|
|
||||||
relative := strings.TrimSuffix(strings.TrimPrefix(cleaned, fs.Path()), base)
|
|
||||||
extension := filepath.Ext(base)
|
|
||||||
name := strings.TrimSuffix(base, extension)
|
|
||||||
|
|
||||||
// Ensure that ".tar" is also counted as apart of the file extension.
|
|
||||||
// There might be a better way to handle this for other double file extensions,
|
|
||||||
// but this is a good workaround for now.
|
|
||||||
if strings.HasSuffix(name, ".tar") {
|
|
||||||
extension = ".tar" + extension
|
|
||||||
name = strings.TrimSuffix(name, ".tar")
|
|
||||||
}
|
|
||||||
|
|
||||||
source, err := os.Open(cleaned)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer source.Close()
|
defer source.Close()
|
||||||
|
info, err := source.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if info.IsDir() || !info.Mode().IsRegular() {
|
||||||
|
// If this is a directory or not a regular file, just throw a not-exist error
|
||||||
|
// since anything calling this function should understand what that means.
|
||||||
|
return ufs.ErrNotExist
|
||||||
|
}
|
||||||
|
currentSize := info.Size()
|
||||||
|
|
||||||
n, err := fs.findCopySuffix(relative, name, extension)
|
// Check that copying this file wouldn't put the server over its limit.
|
||||||
|
if err := fs.HasSpaceFor(currentSize); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
base := info.Name()
|
||||||
|
extension := filepath.Ext(base)
|
||||||
|
baseName := strings.TrimSuffix(base, extension)
|
||||||
|
|
||||||
|
// Ensure that ".tar" is also counted as apart of the file extension.
|
||||||
|
// There might be a better way to handle this for other double file extensions,
|
||||||
|
// but this is a good workaround for now.
|
||||||
|
if strings.HasSuffix(baseName, ".tar") {
|
||||||
|
extension = ".tar" + extension
|
||||||
|
baseName = strings.TrimSuffix(baseName, ".tar")
|
||||||
|
}
|
||||||
|
|
||||||
|
newName, err := fs.findCopySuffix(dirfd, baseName, extension)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dst, err := fs.unixFS.OpenFileat(dirfd, newName, ufs.O_WRONLY|ufs.O_CREATE, info.Mode())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.Writefile(path.Join(relative, n), source)
|
// Do not use CopyBuffer here, it is wasteful as the file implements
|
||||||
|
// io.ReaderFrom, which causes it to not use the buffer anyways.
|
||||||
|
n, err := io.Copy(dst, io.LimitReader(source, currentSize))
|
||||||
|
fs.unixFS.Add(n)
|
||||||
|
|
||||||
|
if !fs.isTest {
|
||||||
|
if err := fs.unixFS.Lchownat(dirfd, newName, config.Get().System.User.Uid, config.Get().System.User.Gid); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Return the error from io.Copy.
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TruncateRootDirectory removes _all_ files and directories from a server's
|
// TruncateRootDirectory removes _all_ files and directories from a server's
|
||||||
@ -380,211 +370,128 @@ func (fs *Filesystem) TruncateRootDirectory() error {
|
|||||||
if err := os.Mkdir(fs.Path(), 0o755); err != nil {
|
if err := os.Mkdir(fs.Path(), 0o755); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
_ = fs.unixFS.Close()
|
||||||
|
unixFS, err := ufs.NewUnixFS(fs.Path(), config.UseOpenat2())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var limit int64
|
||||||
|
if fs.isTest {
|
||||||
|
limit = 0
|
||||||
|
} else {
|
||||||
|
limit = fs.unixFS.Limit()
|
||||||
|
}
|
||||||
|
fs.unixFS = ufs.NewQuota(unixFS, limit)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete removes a file or folder from the system. Prevents the user from
|
// Delete removes a file or folder from the system. Prevents the user from
|
||||||
// accidentally (or maliciously) removing their root server data directory.
|
// accidentally (or maliciously) removing their root server data directory.
|
||||||
func (fs *Filesystem) Delete(p string) error {
|
func (fs *Filesystem) Delete(p string) error {
|
||||||
// This is one of the few (only?) places in the codebase where we're explicitly not using
|
return fs.unixFS.RemoveAll(p)
|
||||||
// the SafePath functionality when working with user provided input. If we did, you would
|
|
||||||
// not be able to delete a file that is a symlink pointing to a location outside the data
|
|
||||||
// directory.
|
|
||||||
//
|
|
||||||
// We also want to avoid resolving a symlink that points _within_ the data directory and thus
|
|
||||||
// deleting the actual source file for the symlink rather than the symlink itself. For these
|
|
||||||
// purposes just resolve the actual file path using filepath.Join() and confirm that the path
|
|
||||||
// exists within the data directory.
|
|
||||||
resolved := fs.unsafeFilePath(p)
|
|
||||||
if !fs.unsafeIsInDataDirectory(resolved) {
|
|
||||||
return NewBadPathResolution(p, resolved)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block any whoopsies.
|
|
||||||
if resolved == fs.Path() {
|
|
||||||
return errors.New("cannot delete root server directory")
|
|
||||||
}
|
|
||||||
|
|
||||||
st, err := os.Lstat(resolved)
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
fs.error(err).Warn("error while attempting to stat file before deletion")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The following logic is used to handle a case where a user attempts to
|
|
||||||
// delete a file that does not exist through a directory symlink.
|
|
||||||
// We don't want to reveal that the file does not exist, so we validate
|
|
||||||
// the path of the symlink and return a bad path error if it is invalid.
|
|
||||||
|
|
||||||
// The requested file or directory doesn't exist, so at this point we
|
|
||||||
// need to iterate up the path chain until we hit a directory that
|
|
||||||
// _does_ exist and can be validated.
|
|
||||||
parts := strings.Split(filepath.Dir(resolved), "/")
|
|
||||||
|
|
||||||
// Range over all the path parts and form directory paths from the end
|
|
||||||
// moving up until we have a valid resolution, or we run out of paths to
|
|
||||||
// try.
|
|
||||||
for k := range parts {
|
|
||||||
try := strings.Join(parts[:(len(parts)-k)], "/")
|
|
||||||
if !fs.unsafeIsInDataDirectory(try) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
t, err := filepath.EvalSymlinks(try)
|
|
||||||
if err == nil {
|
|
||||||
if !fs.unsafeIsInDataDirectory(t) {
|
|
||||||
return NewBadPathResolution(p, t)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Always return early if the file does not exist.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the file is not a symlink, we need to check that it is not within a
|
|
||||||
// symlinked directory that points outside the data directory.
|
|
||||||
if st.Mode()&os.ModeSymlink == 0 {
|
|
||||||
ep, err := filepath.EvalSymlinks(resolved)
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if !fs.unsafeIsInDataDirectory(ep) {
|
|
||||||
return NewBadPathResolution(p, ep)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if st.IsDir() {
|
|
||||||
if s, err := fs.DirectorySize(resolved); err == nil {
|
|
||||||
fs.addDisk(-s)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fs.addDisk(-st.Size())
|
|
||||||
}
|
|
||||||
|
|
||||||
return os.RemoveAll(resolved)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type fileOpener struct {
|
//type fileOpener struct {
|
||||||
busy uint
|
// fs *Filesystem
|
||||||
}
|
// busy uint
|
||||||
|
//}
|
||||||
// Attempts to open a given file up to "attempts" number of times, using a backoff. If the file
|
//
|
||||||
// cannot be opened because of a "text file busy" error, we will attempt until the number of attempts
|
//// Attempts to open a given file up to "attempts" number of times, using a backoff. If the file
|
||||||
// has been exhaused, at which point we will abort with an error.
|
//// cannot be opened because of a "text file busy" error, we will attempt until the number of attempts
|
||||||
func (fo *fileOpener) open(path string, flags int, perm os.FileMode) (*os.File, error) {
|
//// has been exhaused, at which point we will abort with an error.
|
||||||
for {
|
//func (fo *fileOpener) open(path string, flags int, perm ufs.FileMode) (ufs.File, error) {
|
||||||
f, err := os.OpenFile(path, flags, perm)
|
// for {
|
||||||
|
// f, err := fo.fs.unixFS.OpenFile(path, flags, perm)
|
||||||
// If there is an error because the text file is busy, go ahead and sleep for a few
|
//
|
||||||
// hundred milliseconds and then try again up to three times before just returning the
|
// // If there is an error because the text file is busy, go ahead and sleep for a few
|
||||||
// error back to the caller.
|
// // hundred milliseconds and then try again up to three times before just returning the
|
||||||
//
|
// // error back to the caller.
|
||||||
// Based on code from: https://github.com/golang/go/issues/22220#issuecomment-336458122
|
// //
|
||||||
if err != nil && fo.busy < 3 && strings.Contains(err.Error(), "text file busy") {
|
// // Based on code from: https://github.com/golang/go/issues/22220#issuecomment-336458122
|
||||||
time.Sleep(100 * time.Millisecond << fo.busy)
|
// if err != nil && fo.busy < 3 && strings.Contains(err.Error(), "text file busy") {
|
||||||
fo.busy++
|
// time.Sleep(100 * time.Millisecond << fo.busy)
|
||||||
continue
|
// fo.busy++
|
||||||
}
|
// continue
|
||||||
|
// }
|
||||||
return f, err
|
//
|
||||||
}
|
// return f, err
|
||||||
}
|
// }
|
||||||
|
//}
|
||||||
|
|
||||||
// ListDirectory lists the contents of a given directory and returns stat
|
// ListDirectory lists the contents of a given directory and returns stat
|
||||||
// information about each file and folder within it.
|
// information about each file and folder within it.
|
||||||
func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
|
func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
|
||||||
cleaned, err := fs.SafePath(p)
|
// Read entries from the path on the filesystem, using the mapped reader, so
|
||||||
if err != nil {
|
// we can map the DirEntry slice into a Stat slice with mimetype information.
|
||||||
return nil, err
|
out, err := ufs.ReadDirMap(fs.unixFS.UnixFS, p, func(e ufs.DirEntry) (Stat, error) {
|
||||||
}
|
info, err := e.Info()
|
||||||
|
if err != nil {
|
||||||
files, err := ioutil.ReadDir(cleaned)
|
return Stat{}, err
|
||||||
if err != nil {
|
}
|
||||||
return nil, err
|
|
||||||
}
|
var d string
|
||||||
|
if e.Type().IsDir() {
|
||||||
var wg sync.WaitGroup
|
d = "inode/directory"
|
||||||
|
} else {
|
||||||
// You must initialize the output of this directory as a non-nil value otherwise
|
d = "application/octet-stream"
|
||||||
// when it is marshaled into a JSON object you'll just get 'null' back, which will
|
}
|
||||||
// break the panel badly.
|
var m *mimetype.MIME
|
||||||
out := make([]Stat, len(files))
|
if e.Type().IsRegular() {
|
||||||
|
// TODO: I should probably find a better way to do this.
|
||||||
// Iterate over all of the files and directories returned and perform an async process
|
eO := e.(interface {
|
||||||
// to get the mime-type for them all.
|
Open() (ufs.File, error)
|
||||||
for i, file := range files {
|
})
|
||||||
wg.Add(1)
|
f, err := eO.Open()
|
||||||
|
if err != nil {
|
||||||
go func(idx int, f os.FileInfo) {
|
return Stat{}, err
|
||||||
defer wg.Done()
|
}
|
||||||
|
m, err = mimetype.DetectReader(f)
|
||||||
var m *mimetype.MIME
|
if err != nil {
|
||||||
d := "inode/directory"
|
log.Error(err.Error())
|
||||||
if !f.IsDir() {
|
}
|
||||||
cleanedp := filepath.Join(cleaned, f.Name())
|
_ = f.Close()
|
||||||
if f.Mode()&os.ModeSymlink != 0 {
|
}
|
||||||
cleanedp, _ = fs.SafePath(filepath.Join(cleaned, f.Name()))
|
|
||||||
}
|
st := Stat{FileInfo: info, Mimetype: d}
|
||||||
|
if m != nil {
|
||||||
// Don't try to detect the type on a pipe — this will just hang the application and
|
st.Mimetype = m.String()
|
||||||
// you'll never get a response back.
|
}
|
||||||
//
|
return st, nil
|
||||||
// @see https://github.com/pterodactyl/panel/issues/4059
|
})
|
||||||
if cleanedp != "" && f.Mode()&os.ModeNamedPipe == 0 {
|
if err != nil {
|
||||||
m, _ = mimetype.DetectFile(filepath.Join(cleaned, f.Name()))
|
return nil, err
|
||||||
} else {
|
}
|
||||||
// Just pass this for an unknown type because the file could not safely be resolved within
|
|
||||||
// the server data path.
|
// Sort entries alphabetically.
|
||||||
d = "application/octet-stream"
|
slices.SortStableFunc(out, func(a, b Stat) int {
|
||||||
}
|
switch {
|
||||||
}
|
case a.Name() == b.Name():
|
||||||
|
return 0
|
||||||
st := Stat{FileInfo: f, Mimetype: d}
|
case a.Name() > b.Name():
|
||||||
if m != nil {
|
return 1
|
||||||
st.Mimetype = m.String()
|
default:
|
||||||
}
|
return -1
|
||||||
out[idx] = st
|
|
||||||
}(i, file)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
// Sort the output alphabetically to begin with since we've run the output
|
|
||||||
// through an asynchronous process and the order is gonna be very random.
|
|
||||||
sort.SliceStable(out, func(i, j int) bool {
|
|
||||||
if out[i].Name() == out[j].Name() || out[i].Name() > out[j].Name() {
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Then, sort it so that directories are listed first in the output. Everything
|
// Sort folders before other file types.
|
||||||
// will continue to be alphabetized at this point.
|
slices.SortStableFunc(out, func(a, b Stat) int {
|
||||||
sort.SliceStable(out, func(i, j int) bool {
|
switch {
|
||||||
return out[i].IsDir()
|
case a.IsDir() && b.IsDir():
|
||||||
|
return 0
|
||||||
|
case a.IsDir():
|
||||||
|
return -1
|
||||||
|
default:
|
||||||
|
return 1
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *Filesystem) Chtimes(path string, atime, mtime time.Time) error {
|
func (fs *Filesystem) Chtimes(path string, atime, mtime time.Time) error {
|
||||||
cleaned, err := fs.SafePath(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if fs.isTest {
|
if fs.isTest {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
return fs.unixFS.Chtimes(path, atime, mtime)
|
||||||
if err := os.Chtimes(cleaned, atime, mtime); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -7,12 +7,13 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
"testing"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
. "github.com/franela/goblin"
|
. "github.com/franela/goblin"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,15 +29,23 @@ func NewFs() (*Filesystem, *rootFs) {
|
|||||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "pterodactyl")
|
tmpDir, err := os.MkdirTemp(os.TempDir(), "pterodactyl")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
// defer os.RemoveAll(tmpDir)
|
|
||||||
|
|
||||||
rfs := rootFs{root: tmpDir}
|
rfs := rootFs{root: tmpDir}
|
||||||
|
|
||||||
rfs.reset()
|
p := filepath.Join(tmpDir, "server")
|
||||||
|
if err := os.Mkdir(p, 0o755); err != nil {
|
||||||
|
panic(err)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
fs := New(filepath.Join(tmpDir, "/server"), 0, []string{})
|
fs, _ := New(p, 0, []string{})
|
||||||
fs.isTest = true
|
fs.isTest = true
|
||||||
|
if err := fs.TruncateRootDirectory(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
return fs, &rfs
|
return fs, &rfs
|
||||||
}
|
}
|
||||||
@ -45,7 +54,7 @@ type rootFs struct {
|
|||||||
root string
|
root string
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFileContent(file *os.File) string {
|
func getFileContent(file ufs.File) string {
|
||||||
var w bytes.Buffer
|
var w bytes.Buffer
|
||||||
if _, err := bufio.NewReader(file).WriteTo(&w); err != nil {
|
if _, err := bufio.NewReader(file).WriteTo(&w); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -54,11 +63,11 @@ func getFileContent(file *os.File) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rfs *rootFs) CreateServerFile(p string, c []byte) error {
|
func (rfs *rootFs) CreateServerFile(p string, c []byte) error {
|
||||||
f, err := os.Create(filepath.Join(rfs.root, "/server", p))
|
f, err := os.Create(filepath.Join(rfs.root, "server", p))
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
f.Write(c)
|
_, _ = f.Write(c)
|
||||||
f.Close()
|
_ = f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@ -69,19 +78,7 @@ func (rfs *rootFs) CreateServerFileFromString(p string, c string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rfs *rootFs) StatServerFile(p string) (os.FileInfo, error) {
|
func (rfs *rootFs) StatServerFile(p string) (os.FileInfo, error) {
|
||||||
return os.Stat(filepath.Join(rfs.root, "/server", p))
|
return os.Stat(filepath.Join(rfs.root, "server", p))
|
||||||
}
|
|
||||||
|
|
||||||
func (rfs *rootFs) reset() {
|
|
||||||
if err := os.RemoveAll(filepath.Join(rfs.root, "/server")); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Mkdir(filepath.Join(rfs.root, "/server"), 0o755); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilesystem_Openfile(t *testing.T) {
|
func TestFilesystem_Openfile(t *testing.T) {
|
||||||
@ -93,7 +90,8 @@ func TestFilesystem_Openfile(t *testing.T) {
|
|||||||
_, _, err := fs.File("foo/bar.txt")
|
_, _, err := fs.File("foo/bar.txt")
|
||||||
|
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrNotExist)).IsTrue()
|
// TODO
|
||||||
|
//g.Assert(IsErrorCode(err, ErrNotExist)).IsTrue()
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("returns file stat information", func() {
|
g.It("returns file stat information", func() {
|
||||||
@ -108,14 +106,14 @@ func TestFilesystem_Openfile(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilesystem_Writefile(t *testing.T) {
|
func TestFilesystem_Writefile(t *testing.T) {
|
||||||
g := Goblin(t)
|
g := Goblin(t)
|
||||||
fs, rfs := NewFs()
|
fs, _ := NewFs()
|
||||||
|
|
||||||
g.Describe("Open and WriteFile", func() {
|
g.Describe("Open and WriteFile", func() {
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
@ -125,22 +123,22 @@ func TestFilesystem_Writefile(t *testing.T) {
|
|||||||
g.It("can create a new file", func() {
|
g.It("can create a new file", func() {
|
||||||
r := bytes.NewReader([]byte("test file content"))
|
r := bytes.NewReader([]byte("test file content"))
|
||||||
|
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
|
g.Assert(fs.CachedUsage()).Equal(int64(0))
|
||||||
|
|
||||||
err := fs.Writefile("test.txt", r)
|
err := fs.Write("test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
f, _, err := fs.File("test.txt")
|
f, _, err := fs.File("test.txt")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
g.Assert(getFileContent(f)).Equal("test file content")
|
g.Assert(getFileContent(f)).Equal("test file content")
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(r.Size())
|
g.Assert(fs.CachedUsage()).Equal(r.Size())
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("can create a new file inside a nested directory with leading slash", func() {
|
g.It("can create a new file inside a nested directory with leading slash", func() {
|
||||||
r := bytes.NewReader([]byte("test file content"))
|
r := bytes.NewReader([]byte("test file content"))
|
||||||
|
|
||||||
err := fs.Writefile("/some/nested/test.txt", r)
|
err := fs.Write("/some/nested/test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
f, _, err := fs.File("/some/nested/test.txt")
|
f, _, err := fs.File("/some/nested/test.txt")
|
||||||
@ -152,7 +150,7 @@ func TestFilesystem_Writefile(t *testing.T) {
|
|||||||
g.It("can create a new file inside a nested directory without a trailing slash", func() {
|
g.It("can create a new file inside a nested directory without a trailing slash", func() {
|
||||||
r := bytes.NewReader([]byte("test file content"))
|
r := bytes.NewReader([]byte("test file content"))
|
||||||
|
|
||||||
err := fs.Writefile("some/../foo/bar/test.txt", r)
|
err := fs.Write("some/../foo/bar/test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
f, _, err := fs.File("foo/bar/test.txt")
|
f, _, err := fs.File("foo/bar/test.txt")
|
||||||
@ -164,13 +162,13 @@ func TestFilesystem_Writefile(t *testing.T) {
|
|||||||
g.It("cannot create a file outside the root directory", func() {
|
g.It("cannot create a file outside the root directory", func() {
|
||||||
r := bytes.NewReader([]byte("test file content"))
|
r := bytes.NewReader([]byte("test file content"))
|
||||||
|
|
||||||
err := fs.Writefile("/some/../foo/../../test.txt", r)
|
err := fs.Write("/some/../foo/../../test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot write a file that exceeds the disk limits", func() {
|
g.It("cannot write a file that exceeds the disk limits", func() {
|
||||||
atomic.StoreInt64(&fs.diskLimit, 1024)
|
fs.SetDiskLimit(1024)
|
||||||
|
|
||||||
b := make([]byte, 1025)
|
b := make([]byte, 1025)
|
||||||
_, err := rand.Read(b)
|
_, err := rand.Read(b)
|
||||||
@ -178,18 +176,18 @@ func TestFilesystem_Writefile(t *testing.T) {
|
|||||||
g.Assert(len(b)).Equal(1025)
|
g.Assert(len(b)).Equal(1025)
|
||||||
|
|
||||||
r := bytes.NewReader(b)
|
r := bytes.NewReader(b)
|
||||||
err = fs.Writefile("test.txt", r)
|
err = fs.Write("test.txt", r, int64(len(b)), 0o644)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
|
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("truncates the file when writing new contents", func() {
|
g.It("truncates the file when writing new contents", func() {
|
||||||
r := bytes.NewReader([]byte("original data"))
|
r := bytes.NewReader([]byte("original data"))
|
||||||
err := fs.Writefile("test.txt", r)
|
err := fs.Write("test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
r = bytes.NewReader([]byte("new data"))
|
r = bytes.NewReader([]byte("new data"))
|
||||||
err = fs.Writefile("test.txt", r)
|
err = fs.Write("test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
f, _, err := fs.File("test.txt")
|
f, _, err := fs.File("test.txt")
|
||||||
@ -200,10 +198,7 @@ func TestFilesystem_Writefile(t *testing.T) {
|
|||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
buf.Truncate(0)
|
buf.Truncate(0)
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
|
||||||
atomic.StoreInt64(&fs.diskLimit, 0)
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -236,17 +231,17 @@ func TestFilesystem_CreateDirectory(t *testing.T) {
|
|||||||
g.It("should not allow the creation of directories outside the root", func() {
|
g.It("should not allow the creation of directories outside the root", func() {
|
||||||
err := fs.CreateDirectory("test", "e/../../something")
|
err := fs.CreateDirectory("test", "e/../../something")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should not increment the disk usage", func() {
|
g.It("should not increment the disk usage", func() {
|
||||||
err := fs.CreateDirectory("test", "/")
|
err := fs.CreateDirectory("test", "/")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
|
g.Assert(fs.CachedUsage()).Equal(int64(0))
|
||||||
})
|
})
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -268,25 +263,25 @@ func TestFilesystem_Rename(t *testing.T) {
|
|||||||
|
|
||||||
err = fs.Rename("source.txt", "target.txt")
|
err = fs.Rename("source.txt", "target.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrExist)).IsTrue("err is not ErrExist")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("returns an error if the final destination is the root directory", func() {
|
g.It("returns an error if the final destination is the root directory", func() {
|
||||||
err := fs.Rename("source.txt", "/")
|
err := fs.Rename("source.txt", "/")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("returns an error if the source destination is the root directory", func() {
|
g.It("returns an error if the source destination is the root directory", func() {
|
||||||
err := fs.Rename("source.txt", "/")
|
err := fs.Rename("/", "target.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("does not allow renaming to a location outside the root", func() {
|
g.It("does not allow renaming to a location outside the root", func() {
|
||||||
err := fs.Rename("source.txt", "../target.txt")
|
err := fs.Rename("source.txt", "../target.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("does not allow renaming from a location outside the root", func() {
|
g.It("does not allow renaming from a location outside the root", func() {
|
||||||
@ -294,7 +289,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
|||||||
|
|
||||||
err = fs.Rename("/../ext-source.txt", "target.txt")
|
err = fs.Rename("/../ext-source.txt", "target.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("allows a file to be renamed", func() {
|
g.It("allows a file to be renamed", func() {
|
||||||
@ -303,7 +298,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
|||||||
|
|
||||||
_, err = rfs.StatServerFile("source.txt")
|
_, err = rfs.StatServerFile("source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
|
|
||||||
st, err := rfs.StatServerFile("target.txt")
|
st, err := rfs.StatServerFile("target.txt")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
@ -320,7 +315,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
|||||||
|
|
||||||
_, err = rfs.StatServerFile("source_dir")
|
_, err = rfs.StatServerFile("source_dir")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
|
|
||||||
st, err := rfs.StatServerFile("target_dir")
|
st, err := rfs.StatServerFile("target_dir")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
@ -330,7 +325,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
|||||||
g.It("returns an error if the source does not exist", func() {
|
g.It("returns an error if the source does not exist", func() {
|
||||||
err := fs.Rename("missing.txt", "target.txt")
|
err := fs.Rename("missing.txt", "target.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("creates directories if they are missing", func() {
|
g.It("creates directories if they are missing", func() {
|
||||||
@ -343,7 +338,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -358,13 +353,13 @@ func TestFilesystem_Copy(t *testing.T) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")))
|
fs.unixFS.SetUsage(int64(utf8.RuneCountInString("test content")))
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if the source does not exist", func() {
|
g.It("should return an error if the source does not exist", func() {
|
||||||
err := fs.Copy("foo.txt")
|
err := fs.Copy("foo.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if the source is outside the root", func() {
|
g.It("should return an error if the source is outside the root", func() {
|
||||||
@ -372,11 +367,11 @@ func TestFilesystem_Copy(t *testing.T) {
|
|||||||
|
|
||||||
err = fs.Copy("../ext-source.txt")
|
err = fs.Copy("../ext-source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if the source directory is outside the root", func() {
|
g.It("should return an error if the source directory is outside the root", func() {
|
||||||
err := os.MkdirAll(filepath.Join(rfs.root, "/nested/in/dir"), 0o755)
|
err := os.MkdirAll(filepath.Join(rfs.root, "nested/in/dir"), 0o755)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = rfs.CreateServerFileFromString("/../nested/in/dir/ext-source.txt", "external content")
|
err = rfs.CreateServerFileFromString("/../nested/in/dir/ext-source.txt", "external content")
|
||||||
@ -384,28 +379,28 @@ func TestFilesystem_Copy(t *testing.T) {
|
|||||||
|
|
||||||
err = fs.Copy("../nested/in/dir/ext-source.txt")
|
err = fs.Copy("../nested/in/dir/ext-source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
|
|
||||||
err = fs.Copy("nested/in/../../../nested/in/dir/ext-source.txt")
|
err = fs.Copy("nested/in/../../../nested/in/dir/ext-source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if the source is a directory", func() {
|
g.It("should return an error if the source is a directory", func() {
|
||||||
err := os.Mkdir(filepath.Join(rfs.root, "/server/dir"), 0o755)
|
err := os.Mkdir(filepath.Join(rfs.root, "server/dir"), 0o755)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Copy("dir")
|
err = fs.Copy("dir")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if there is not space to copy the file", func() {
|
g.It("should return an error if there is not space to copy the file", func() {
|
||||||
atomic.StoreInt64(&fs.diskLimit, 2)
|
fs.SetDiskLimit(2)
|
||||||
|
|
||||||
err := fs.Copy("source.txt")
|
err := fs.Copy("source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
|
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue("err is not ErrCodeDiskSpace")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should create a copy of the file and increment the disk used", func() {
|
g.It("should create a copy of the file and increment the disk used", func() {
|
||||||
@ -433,7 +428,7 @@ func TestFilesystem_Copy(t *testing.T) {
|
|||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(utf8.RuneCountInString("test content")) * 3)
|
g.Assert(fs.CachedUsage()).Equal(int64(utf8.RuneCountInString("test content")) * 3)
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should create a copy inside of a directory", func() {
|
g.It("should create a copy inside of a directory", func() {
|
||||||
@ -454,10 +449,7 @@ func TestFilesystem_Copy(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
|
||||||
atomic.StoreInt64(&fs.diskLimit, 0)
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -472,7 +464,7 @@ func TestFilesystem_Delete(t *testing.T) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")))
|
fs.unixFS.SetUsage(int64(utf8.RuneCountInString("test content")))
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("does not delete files outside the root directory", func() {
|
g.It("does not delete files outside the root directory", func() {
|
||||||
@ -480,13 +472,13 @@ func TestFilesystem_Delete(t *testing.T) {
|
|||||||
|
|
||||||
err = fs.Delete("../ext-source.txt")
|
err = fs.Delete("../ext-source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("does not allow the deletion of the root directory", func() {
|
g.It("does not allow the deletion of the root directory", func() {
|
||||||
err := fs.Delete("/")
|
err := fs.Delete("/")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(err.Error()).Equal("cannot delete root server directory")
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("does not return an error if the target does not exist", func() {
|
g.It("does not return an error if the target does not exist", func() {
|
||||||
@ -504,9 +496,9 @@ func TestFilesystem_Delete(t *testing.T) {
|
|||||||
|
|
||||||
_, err = rfs.StatServerFile("source.txt")
|
_, err = rfs.StatServerFile("source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
|
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
|
g.Assert(fs.CachedUsage()).Equal(int64(0))
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("deletes all items inside a directory if the directory is deleted", func() {
|
g.It("deletes all items inside a directory if the directory is deleted", func() {
|
||||||
@ -524,16 +516,16 @@ func TestFilesystem_Delete(t *testing.T) {
|
|||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")*3))
|
fs.unixFS.SetUsage(int64(utf8.RuneCountInString("test content") * 3))
|
||||||
|
|
||||||
err = fs.Delete("foo")
|
err = fs.Delete("foo")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
|
g.Assert(fs.unixFS.Usage()).Equal(int64(0))
|
||||||
|
|
||||||
for _, s := range sources {
|
for _, s := range sources {
|
||||||
_, err = rfs.StatServerFile(s)
|
_, err = rfs.StatServerFile(s)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -589,7 +581,7 @@ func TestFilesystem_Delete(t *testing.T) {
|
|||||||
// Delete a file inside the symlinked directory.
|
// Delete a file inside the symlinked directory.
|
||||||
err = fs.Delete("symlink/source.txt")
|
err = fs.Delete("symlink/source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
|
|
||||||
// Ensure the file outside the root directory still exists.
|
// Ensure the file outside the root directory still exists.
|
||||||
_, err = os.Lstat(filepath.Join(rfs.root, "foo/source.txt"))
|
_, err = os.Lstat(filepath.Join(rfs.root, "foo/source.txt"))
|
||||||
@ -608,14 +600,11 @@ func TestFilesystem_Delete(t *testing.T) {
|
|||||||
// Delete a file inside the symlinked directory.
|
// Delete a file inside the symlinked directory.
|
||||||
err = fs.Delete("symlink/source.txt")
|
err = fs.Delete("symlink/source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
|
||||||
atomic.StoreInt64(&fs.diskLimit, 0)
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,71 +1,28 @@
|
|||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
iofs "io/fs"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Checks if the given file or path is in the server's file denylist. If so, an Error
|
// Checks if the given file or path is in the server's file denylist. If so, an Error
|
||||||
// is returned, otherwise nil is returned.
|
// is returned, otherwise nil is returned.
|
||||||
func (fs *Filesystem) IsIgnored(paths ...string) error {
|
func (fs *Filesystem) IsIgnored(paths ...string) error {
|
||||||
for _, p := range paths {
|
for _, p := range paths {
|
||||||
sp, err := fs.SafePath(p)
|
//sp, err := fs.SafePath(p)
|
||||||
if err != nil {
|
//if err != nil {
|
||||||
return err
|
// return err
|
||||||
}
|
//}
|
||||||
if fs.denylist.MatchesPath(sp) {
|
// TODO: update logic to use unixFS
|
||||||
return errors.WithStack(&Error{code: ErrCodeDenylistFile, path: p, resolved: sp})
|
if fs.denylist.MatchesPath(p) {
|
||||||
|
return errors.WithStack(&Error{code: ErrCodeDenylistFile, path: p, resolved: p})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normalizes a directory being passed in to ensure the user is not able to escape
|
|
||||||
// from their data directory. After normalization if the directory is still within their home
|
|
||||||
// path it is returned. If they managed to "escape" an error will be returned.
|
|
||||||
//
|
|
||||||
// This logic is actually copied over from the SFTP server code. Ideally that eventually
|
|
||||||
// either gets ported into this application, or is able to make use of this package.
|
|
||||||
func (fs *Filesystem) SafePath(p string) (string, error) {
|
|
||||||
// Start with a cleaned up path before checking the more complex bits.
|
|
||||||
r := fs.unsafeFilePath(p)
|
|
||||||
|
|
||||||
// At the same time, evaluate the symlink status and determine where this file or folder
|
|
||||||
// is truly pointing to.
|
|
||||||
ep, err := filepath.EvalSymlinks(r)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
|
|
||||||
} else if os.IsNotExist(err) {
|
|
||||||
// The target of one of the symlinks (EvalSymlinks is recursive) does not exist.
|
|
||||||
// So we get what target path does not exist and check if it's within the data
|
|
||||||
// directory. If it is, we return the original path, otherwise we return an error.
|
|
||||||
pErr, ok := err.(*iofs.PathError)
|
|
||||||
if !ok {
|
|
||||||
return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
|
|
||||||
}
|
|
||||||
ep = pErr.Path
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the requested directory from EvalSymlinks begins with the server root directory go
|
|
||||||
// ahead and return it. If not we'll return an error which will block any further action
|
|
||||||
// on the file.
|
|
||||||
if fs.unsafeIsInDataDirectory(ep) {
|
|
||||||
// Returning the original path here instead of the resolved path ensures that
|
|
||||||
// whatever the user is trying to do will work as expected. If we returned the
|
|
||||||
// resolved path, the user would be unable to know that it is in fact a symlink.
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", NewBadPathResolution(p, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate a path to the file by cleaning it up and appending the root server path to it. This
|
// Generate a path to the file by cleaning it up and appending the root server path to it. This
|
||||||
// DOES NOT guarantee that the file resolves within the server data directory. You'll want to use
|
// DOES NOT guarantee that the file resolves within the server data directory. You'll want to use
|
||||||
// the fs.unsafeIsInDataDirectory(p) function to confirm.
|
// the fs.unsafeIsInDataDirectory(p) function to confirm.
|
||||||
@ -84,51 +41,3 @@ func (fs *Filesystem) unsafeFilePath(p string) string {
|
|||||||
func (fs *Filesystem) unsafeIsInDataDirectory(p string) bool {
|
func (fs *Filesystem) unsafeIsInDataDirectory(p string) bool {
|
||||||
return strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", strings.TrimSuffix(fs.Path(), "/")+"/")
|
return strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", strings.TrimSuffix(fs.Path(), "/")+"/")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Executes the fs.SafePath function in parallel against an array of paths. If any of the calls
|
|
||||||
// fails an error will be returned.
|
|
||||||
func (fs *Filesystem) ParallelSafePath(paths []string) ([]string, error) {
|
|
||||||
var cleaned []string
|
|
||||||
|
|
||||||
// Simple locker function to avoid racy appends to the array of cleaned paths.
|
|
||||||
m := new(sync.Mutex)
|
|
||||||
push := func(c string) {
|
|
||||||
m.Lock()
|
|
||||||
cleaned = append(cleaned, c)
|
|
||||||
m.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create an error group that we can use to run processes in parallel while retaining
|
|
||||||
// the ability to cancel the entire process immediately should any of it fail.
|
|
||||||
g, ctx := errgroup.WithContext(context.Background())
|
|
||||||
|
|
||||||
// Iterate over all of the paths and generate a cleaned path, if there is an error for any
|
|
||||||
// of the files, abort the process.
|
|
||||||
for _, p := range paths {
|
|
||||||
// Create copy so we can use it within the goroutine correctly.
|
|
||||||
pi := p
|
|
||||||
|
|
||||||
// Recursively call this function to continue digging through the directory tree within
|
|
||||||
// a separate goroutine. If the context is canceled abort this process.
|
|
||||||
g.Go(func() error {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
// If the callback returns true, go ahead and keep walking deeper. This allows
|
|
||||||
// us to programmatically continue deeper into directories, or stop digging
|
|
||||||
// if that pathway knows it needs nothing else.
|
|
||||||
if c, err := fs.SafePath(pi); err != nil {
|
|
||||||
return err
|
|
||||||
} else {
|
|
||||||
push(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block until all of the routines finish and have returned a value.
|
|
||||||
return cleaned, g.Wait()
|
|
||||||
}
|
|
||||||
|
|||||||
@ -8,6 +8,8 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
. "github.com/franela/goblin"
|
. "github.com/franela/goblin"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFilesystem_Path(t *testing.T) {
|
func TestFilesystem_Path(t *testing.T) {
|
||||||
@ -21,80 +23,6 @@ func TestFilesystem_Path(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilesystem_SafePath(t *testing.T) {
|
|
||||||
g := Goblin(t)
|
|
||||||
fs, rfs := NewFs()
|
|
||||||
prefix := filepath.Join(rfs.root, "/server")
|
|
||||||
|
|
||||||
g.Describe("SafePath", func() {
|
|
||||||
g.It("returns a cleaned path to a given file", func() {
|
|
||||||
p, err := fs.SafePath("test.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/test.txt")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("/test.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/test.txt")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("./test.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/test.txt")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("/foo/../test.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/test.txt")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("/foo/bar")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/foo/bar")
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("handles root directory access", func() {
|
|
||||||
p, err := fs.SafePath("/")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix)
|
|
||||||
|
|
||||||
p, err = fs.SafePath("")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix)
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("removes trailing slashes from paths", func() {
|
|
||||||
p, err := fs.SafePath("/foo/bar/")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/foo/bar")
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("handles deeply nested directories that do not exist", func() {
|
|
||||||
p, err := fs.SafePath("/foo/bar/baz/quaz/../../ducks/testing.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/foo/bar/ducks/testing.txt")
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("blocks access to files outside the root directory", func() {
|
|
||||||
p, err := fs.SafePath("../test.txt")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
g.Assert(p).Equal("")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("/../test.txt")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
g.Assert(p).Equal("")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("./foo/../../test.txt")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
g.Assert(p).Equal("")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("..")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
g.Assert(p).Equal("")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// We test against accessing files outside the root directory in the tests, however it
|
// We test against accessing files outside the root directory in the tests, however it
|
||||||
// is still possible for someone to mess up and not properly use this safe path call. In
|
// is still possible for someone to mess up and not properly use this safe path call. In
|
||||||
// order to truly confirm this, we'll try to pass in a symlinked malicious file to all of
|
// order to truly confirm this, we'll try to pass in a symlinked malicious file to all of
|
||||||
@ -133,7 +61,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
|||||||
|
|
||||||
err := fs.Writefile("symlinked.txt", r)
|
err := fs.Writefile("symlinked.txt", r)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot write to a non-existent file symlinked outside the root", func() {
|
g.It("cannot write to a non-existent file symlinked outside the root", func() {
|
||||||
@ -141,7 +69,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
|||||||
|
|
||||||
err := fs.Writefile("symlinked_does_not_exist.txt", r)
|
err := fs.Writefile("symlinked_does_not_exist.txt", r)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot write to chained symlinks with target that does not exist outside the root", func() {
|
g.It("cannot write to chained symlinks with target that does not exist outside the root", func() {
|
||||||
@ -149,7 +77,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
|||||||
|
|
||||||
err := fs.Writefile("symlinked_does_not_exist2.txt", r)
|
err := fs.Writefile("symlinked_does_not_exist2.txt", r)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot write a file to a directory symlinked outside the root", func() {
|
g.It("cannot write a file to a directory symlinked outside the root", func() {
|
||||||
@ -157,7 +85,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
|||||||
|
|
||||||
err := fs.Writefile("external_dir/foo.txt", r)
|
err := fs.Writefile("external_dir/foo.txt", r)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -165,55 +93,54 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
|||||||
g.It("cannot create a directory outside the root", func() {
|
g.It("cannot create a directory outside the root", func() {
|
||||||
err := fs.CreateDirectory("my_dir", "external_dir")
|
err := fs.CreateDirectory("my_dir", "external_dir")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot create a nested directory outside the root", func() {
|
g.It("cannot create a nested directory outside the root", func() {
|
||||||
err := fs.CreateDirectory("my/nested/dir", "external_dir/foo/bar")
|
err := fs.CreateDirectory("my/nested/dir", "external_dir/foo/bar")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot create a nested directory outside the root", func() {
|
g.It("cannot create a nested directory outside the root", func() {
|
||||||
err := fs.CreateDirectory("my/nested/dir", "external_dir/server")
|
err := fs.CreateDirectory("my/nested/dir", "external_dir/server")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
g.Describe("Rename", func() {
|
g.Describe("Rename", func() {
|
||||||
g.It("cannot rename a file symlinked outside the directory root", func() {
|
g.It("can rename a file symlinked outside the directory root", func() {
|
||||||
err := fs.Rename("symlinked.txt", "foo.txt")
|
_, err := os.Lstat(filepath.Join(rfs.root, "server", "symlinked.txt"))
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
err = fs.Rename("symlinked.txt", "foo.txt")
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
_, err = os.Lstat(filepath.Join(rfs.root, "server", "foo.txt"))
|
||||||
|
g.Assert(err).IsNil()
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot rename a symlinked directory outside the root", func() {
|
g.It("can rename a symlinked directory outside the root", func() {
|
||||||
err := fs.Rename("external_dir", "foo")
|
_, err := os.Lstat(filepath.Join(rfs.root, "server", "external_dir"))
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
err = fs.Rename("external_dir", "foo")
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
_, err = os.Lstat(filepath.Join(rfs.root, "server", "foo"))
|
||||||
|
g.Assert(err).IsNil()
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot rename a file to a location outside the directory root", func() {
|
g.It("cannot rename a file to a location outside the directory root", func() {
|
||||||
rfs.CreateServerFileFromString("my_file.txt", "internal content")
|
_ = rfs.CreateServerFileFromString("my_file.txt", "internal content")
|
||||||
|
t.Log(rfs.root)
|
||||||
|
|
||||||
err := fs.Rename("my_file.txt", "external_dir/my_file.txt")
|
st, err := os.Lstat(filepath.Join(rfs.root, "server", "foo"))
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(st.Mode()&ufs.ModeSymlink != 0).IsTrue()
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
g.Describe("Chown", func() {
|
err = fs.Rename("my_file.txt", "foo/my_file.txt")
|
||||||
g.It("cannot chown a file symlinked outside the directory root", func() {
|
g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue()
|
||||||
err := fs.Chown("symlinked.txt")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("cannot chown a directory symlinked outside the directory root", func() {
|
st, err = os.Lstat(filepath.Join(rfs.root, "malicious_dir", "my_file.txt"))
|
||||||
err := fs.Chown("external_dir")
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue()
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -221,7 +148,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
|||||||
g.It("cannot copy a file symlinked outside the directory root", func() {
|
g.It("cannot copy a file symlinked outside the directory root", func() {
|
||||||
err := fs.Copy("symlinked.txt")
|
err := fs.Copy("symlinked.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -235,9 +162,9 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
|||||||
|
|
||||||
_, err = rfs.StatServerFile("symlinked.txt")
|
_, err = rfs.StatServerFile("symlinked.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,16 +1,18 @@
|
|||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"encoding/json"
|
||||||
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gabriel-vasile/mimetype"
|
"github.com/gabriel-vasile/mimetype"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Stat struct {
|
type Stat struct {
|
||||||
os.FileInfo
|
ufs.FileInfo
|
||||||
Mimetype string
|
Mimetype string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -31,40 +33,31 @@ func (s *Stat) MarshalJSON() ([]byte, error) {
|
|||||||
Created: s.CTime().Format(time.RFC3339),
|
Created: s.CTime().Format(time.RFC3339),
|
||||||
Modified: s.ModTime().Format(time.RFC3339),
|
Modified: s.ModTime().Format(time.RFC3339),
|
||||||
Mode: s.Mode().String(),
|
Mode: s.Mode().String(),
|
||||||
// Using `&os.ModePerm` on the file's mode will cause the mode to only have the permission values, and nothing else.
|
// Using `&ModePerm` on the file's mode will cause the mode to only have the permission values, and nothing else.
|
||||||
ModeBits: strconv.FormatUint(uint64(s.Mode()&os.ModePerm), 8),
|
ModeBits: strconv.FormatUint(uint64(s.Mode()&ufs.ModePerm), 8),
|
||||||
Size: s.Size(),
|
Size: s.Size(),
|
||||||
Directory: s.IsDir(),
|
Directory: s.IsDir(),
|
||||||
File: !s.IsDir(),
|
File: !s.IsDir(),
|
||||||
Symlink: s.Mode().Perm()&os.ModeSymlink != 0,
|
Symlink: s.Mode().Type()&ufs.ModeSymlink != 0,
|
||||||
Mime: s.Mimetype,
|
Mime: s.Mimetype,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat stats a file or folder and returns the base stat object from go along
|
func statFromFile(f ufs.File) (Stat, error) {
|
||||||
// with the MIME data that can be used for editing files.
|
s, err := f.Stat()
|
||||||
func (fs *Filesystem) Stat(p string) (Stat, error) {
|
|
||||||
cleaned, err := fs.SafePath(p)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Stat{}, err
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
return fs.unsafeStat(cleaned)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *Filesystem) unsafeStat(p string) (Stat, error) {
|
|
||||||
s, err := os.Stat(p)
|
|
||||||
if err != nil {
|
|
||||||
return Stat{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var m *mimetype.MIME
|
var m *mimetype.MIME
|
||||||
if !s.IsDir() {
|
if !s.IsDir() {
|
||||||
m, err = mimetype.DetectFile(p)
|
m, err = mimetype.DetectReader(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Stat{}, err
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
|
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return Stat{}, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
st := Stat{
|
st := Stat{
|
||||||
FileInfo: s,
|
FileInfo: s,
|
||||||
Mimetype: "inode/directory",
|
Mimetype: "inode/directory",
|
||||||
@ -72,6 +65,20 @@ func (fs *Filesystem) unsafeStat(p string) (Stat, error) {
|
|||||||
if m != nil {
|
if m != nil {
|
||||||
st.Mimetype = m.String()
|
st.Mimetype = m.String()
|
||||||
}
|
}
|
||||||
|
return st, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat stats a file or folder and returns the base stat object from go along
|
||||||
|
// with the MIME data that can be used for editing files.
|
||||||
|
func (fs *Filesystem) Stat(p string) (Stat, error) {
|
||||||
|
f, err := fs.unixFS.Open(p)
|
||||||
|
if err != nil {
|
||||||
|
return Stat{}, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
st, err := statFromFile(f)
|
||||||
|
if err != nil {
|
||||||
|
return Stat{}, err
|
||||||
|
}
|
||||||
return st, nil
|
return st, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,13 +0,0 @@
|
|||||||
package filesystem
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CTime returns the time that the file/folder was created.
|
|
||||||
func (s *Stat) CTime() time.Time {
|
|
||||||
st := s.Sys().(*syscall.Stat_t)
|
|
||||||
|
|
||||||
return time.Unix(st.Ctimespec.Sec, st.Ctimespec.Nsec)
|
|
||||||
}
|
|
||||||
@ -3,12 +3,22 @@ package filesystem
|
|||||||
import (
|
import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Returns the time that the file/folder was created.
|
// CTime returns the time that the file/folder was created.
|
||||||
|
//
|
||||||
|
// TODO: remove. Ctim is not actually ever been correct and doesn't actually
|
||||||
|
// return the creation time.
|
||||||
func (s *Stat) CTime() time.Time {
|
func (s *Stat) CTime() time.Time {
|
||||||
st := s.Sys().(*syscall.Stat_t)
|
if st, ok := s.Sys().(*unix.Stat_t); ok {
|
||||||
|
// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
|
||||||
// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
|
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
||||||
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
}
|
||||||
|
if st, ok := s.Sys().(*syscall.Stat_t); ok {
|
||||||
|
// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
|
||||||
|
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
||||||
|
}
|
||||||
|
return time.Time{}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,12 +0,0 @@
|
|||||||
package filesystem
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// On linux systems this will return the time that the file was created.
|
|
||||||
// However, I have no idea how to do this on windows, so we're skipping it
|
|
||||||
// for right now.
|
|
||||||
func (s *Stat) CTime() time.Time {
|
|
||||||
return s.ModTime()
|
|
||||||
}
|
|
||||||
@ -2,7 +2,6 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"html/template"
|
"html/template"
|
||||||
"io"
|
"io"
|
||||||
@ -14,8 +13,8 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
|
"github.com/docker/docker/api/types/image"
|
||||||
"github.com/docker/docker/api/types/mount"
|
"github.com/docker/docker/api/types/mount"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
|
||||||
@ -162,7 +161,7 @@ func (s *Server) SetRestoring(state bool) {
|
|||||||
|
|
||||||
// RemoveContainer removes the installation container for the server.
|
// RemoveContainer removes the installation container for the server.
|
||||||
func (ip *InstallationProcess) RemoveContainer() error {
|
func (ip *InstallationProcess) RemoveContainer() error {
|
||||||
err := ip.client.ContainerRemove(ip.Server.Context(), ip.Server.ID()+"_installer", types.ContainerRemoveOptions{
|
err := ip.client.ContainerRemove(ip.Server.Context(), ip.Server.ID()+"_installer", container.RemoveOptions{
|
||||||
RemoveVolumes: true,
|
RemoveVolumes: true,
|
||||||
Force: true,
|
Force: true,
|
||||||
})
|
})
|
||||||
@ -218,30 +217,18 @@ func (ip *InstallationProcess) tempDir() string {
|
|||||||
// can be properly mounted into the installation container and then executed.
|
// can be properly mounted into the installation container and then executed.
|
||||||
func (ip *InstallationProcess) writeScriptToDisk() error {
|
func (ip *InstallationProcess) writeScriptToDisk() error {
|
||||||
// Make sure the temp directory root exists before trying to make a directory within it. The
|
// Make sure the temp directory root exists before trying to make a directory within it. The
|
||||||
// ioutil.TempDir call expects this base to exist, it won't create it for you.
|
// os.TempDir call expects this base to exist, it won't create it for you.
|
||||||
if err := os.MkdirAll(ip.tempDir(), 0o700); err != nil {
|
if err := os.MkdirAll(ip.tempDir(), 0o700); err != nil {
|
||||||
return errors.WithMessage(err, "could not create temporary directory for install process")
|
return errors.WithMessage(err, "could not create temporary directory for install process")
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.OpenFile(filepath.Join(ip.tempDir(), "install.sh"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644)
|
f, err := os.OpenFile(filepath.Join(ip.tempDir(), "install.sh"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessage(err, "failed to write server installation script to disk before mount")
|
return errors.WithMessage(err, "failed to write server installation script to disk before mount")
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
if _, err := io.Copy(f, strings.NewReader(strings.ReplaceAll(ip.Script.Script, "\r\n", "\n"))); err != nil {
|
||||||
w := bufio.NewWriter(f)
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(bytes.NewReader([]byte(ip.Script.Script)))
|
|
||||||
for scanner.Scan() {
|
|
||||||
w.WriteString(scanner.Text() + "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -260,7 +247,7 @@ func (ip *InstallationProcess) pullInstallationImage() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the ImagePullOptions.
|
// Get the ImagePullOptions.
|
||||||
imagePullOptions := types.ImagePullOptions{All: false}
|
imagePullOptions := image.PullOptions{All: false}
|
||||||
if registryAuth != nil {
|
if registryAuth != nil {
|
||||||
b64, err := registryAuth.Base64()
|
b64, err := registryAuth.Base64()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -273,7 +260,7 @@ func (ip *InstallationProcess) pullInstallationImage() error {
|
|||||||
|
|
||||||
r, err := ip.client.ImagePull(ip.Server.Context(), ip.Script.ContainerImage, imagePullOptions)
|
r, err := ip.client.ImagePull(ip.Server.Context(), ip.Script.ContainerImage, imagePullOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
images, ierr := ip.client.ImageList(ip.Server.Context(), types.ImageListOptions{})
|
images, ierr := ip.client.ImageList(ip.Server.Context(), image.ListOptions{})
|
||||||
if ierr != nil {
|
if ierr != nil {
|
||||||
// Well damn, something has gone really wrong here, just go ahead and abort there
|
// Well damn, something has gone really wrong here, just go ahead and abort there
|
||||||
// isn't much anything we can do to try and self-recover from this.
|
// isn't much anything we can do to try and self-recover from this.
|
||||||
@ -345,7 +332,7 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
|
|||||||
defer ip.RemoveContainer()
|
defer ip.RemoveContainer()
|
||||||
|
|
||||||
ip.Server.Log().WithField("container_id", containerId).Debug("pulling installation logs for server")
|
ip.Server.Log().WithField("container_id", containerId).Debug("pulling installation logs for server")
|
||||||
reader, err := ip.client.ContainerLogs(ip.Server.Context(), containerId, types.ContainerLogsOptions{
|
reader, err := ip.client.ContainerLogs(ip.Server.Context(), containerId, container.LogsOptions{
|
||||||
ShowStdout: true,
|
ShowStdout: true,
|
||||||
ShowStderr: true,
|
ShowStderr: true,
|
||||||
Follow: false,
|
Follow: false,
|
||||||
@ -476,7 +463,7 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ip.Server.Log().WithField("container_id", r.ID).Info("running installation script for server in container")
|
ip.Server.Log().WithField("container_id", r.ID).Info("running installation script for server in container")
|
||||||
if err := ip.client.ContainerStart(ctx, r.ID, types.ContainerStartOptions{}); err != nil {
|
if err := ip.client.ContainerStart(ctx, r.ID, container.StartOptions{}); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -511,7 +498,7 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
|||||||
// the server configuration directory, as well as to a websocket listener so
|
// the server configuration directory, as well as to a websocket listener so
|
||||||
// that the process can be viewed in the panel by administrators.
|
// that the process can be viewed in the panel by administrators.
|
||||||
func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) error {
|
func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) error {
|
||||||
opts := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Follow: true}
|
opts := container.LogsOptions{ShowStdout: true, ShowStderr: true, Follow: true}
|
||||||
reader, err := ip.client.ContainerLogs(ctx, id, opts)
|
reader, err := ip.client.ContainerLogs(ctx, id, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@ -13,7 +14,6 @@ import (
|
|||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/gammazero/workerpool"
|
"github.com/gammazero/workerpool"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
@ -196,7 +196,10 @@ func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server,
|
|||||||
return nil, errors.WithStackIf(err)
|
return nil, errors.WithStackIf(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
|
s.fs, err = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Right now we only support a Docker based environment, so I'm going to hard code
|
// Right now we only support a Docker based environment, so I'm going to hard code
|
||||||
// this logic in. When we're ready to support other environment we'll need to make
|
// this logic in. When we're ready to support other environment we'll need to make
|
||||||
|
|||||||
@ -29,6 +29,21 @@ func (s *Server) Mounts() []environment.Mount {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle mounting a generated `/etc/passwd` if the feature is enabled.
|
||||||
|
if passwd := config.Get().System.Passwd; passwd.Enable {
|
||||||
|
s.Log().WithFields(log.Fields{"source_path": passwd.Directory}).Info("mouting generated /etc/{group,passwd} to workaround UID/GID issues")
|
||||||
|
m = append(m, environment.Mount{
|
||||||
|
Source: filepath.Join(passwd.Directory, "group"),
|
||||||
|
Target: "/etc/group",
|
||||||
|
ReadOnly: true,
|
||||||
|
})
|
||||||
|
m = append(m, environment.Mount{
|
||||||
|
Source: filepath.Join(passwd.Directory, "passwd"),
|
||||||
|
Target: "/etc/passwd",
|
||||||
|
ReadOnly: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Also include any of this server's custom mounts when returning them.
|
// Also include any of this server's custom mounts when returning them.
|
||||||
return append(m, s.customMounts()...)
|
return append(m, s.customMounts()...)
|
||||||
}
|
}
|
||||||
@ -56,14 +71,12 @@ func (s *Server) customMounts() []environment.Mount {
|
|||||||
if !strings.HasPrefix(source, filepath.Clean(allowed)) {
|
if !strings.HasPrefix(source, filepath.Clean(allowed)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
mounted = true
|
mounted = true
|
||||||
mounts = append(mounts, environment.Mount{
|
mounts = append(mounts, environment.Mount{
|
||||||
Source: source,
|
Source: source,
|
||||||
Target: target,
|
Target: target,
|
||||||
ReadOnly: m.ReadOnly,
|
ReadOnly: m.ReadOnly,
|
||||||
})
|
})
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,6 @@ package server
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
@ -161,7 +160,7 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
|
|||||||
|
|
||||||
return s.Environment.Start(s.Context())
|
return s.Environment.Start(s.Context())
|
||||||
case PowerActionTerminate:
|
case PowerActionTerminate:
|
||||||
return s.Environment.Terminate(s.Context(), os.Kill)
|
return s.Environment.Terminate(s.Context(), "SIGKILL")
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.New("attempting to handle unknown power action")
|
return errors.New("attempting to handle unknown power action")
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
@ -11,7 +12,6 @@ import (
|
|||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/creasty/defaults"
|
"github.com/creasty/defaults"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
|
|||||||
@ -35,8 +35,8 @@ type Archive struct {
|
|||||||
func NewArchive(t *Transfer, size uint64) *Archive {
|
func NewArchive(t *Transfer, size uint64) *Archive {
|
||||||
return &Archive{
|
return &Archive{
|
||||||
archive: &filesystem.Archive{
|
archive: &filesystem.Archive{
|
||||||
BasePath: t.Server.Filesystem().Path(),
|
Filesystem: t.Server.Filesystem(),
|
||||||
Progress: progress.NewProgress(size),
|
Progress: progress.NewProgress(size),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -28,6 +28,7 @@ func (s *Server) SyncWithEnvironment() {
|
|||||||
Mounts: s.Mounts(),
|
Mounts: s.Mounts(),
|
||||||
Allocations: cfg.Allocations,
|
Allocations: cfg.Allocations,
|
||||||
Limits: cfg.Build,
|
Limits: cfg.Build,
|
||||||
|
Labels: cfg.Labels,
|
||||||
})
|
})
|
||||||
|
|
||||||
// For Docker specific environments we also want to update the configured image
|
// For Docker specific environments we also want to update the configured image
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package sftp
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@ -122,7 +121,7 @@ func (h *Handler) Filewrite(request *sftp.Request) (io.WriterAt, error) {
|
|||||||
if !h.can(permission) {
|
if !h.can(permission) {
|
||||||
return nil, sftp.ErrSSHFxPermissionDenied
|
return nil, sftp.ErrSSHFxPermissionDenied
|
||||||
}
|
}
|
||||||
f, err := h.fs.Touch(request.Filepath, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
|
f, err := h.fs.Touch(request.Filepath, os.O_RDWR|os.O_TRUNC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithField("flags", request.Flags).WithField("error", err).Error("failed to open existing file on system")
|
l.WithField("flags", request.Flags).WithField("error", err).Error("failed to open existing file on system")
|
||||||
return nil, sftp.ErrSSHFxFailure
|
return nil, sftp.ErrSSHFxFailure
|
||||||
@ -220,16 +219,8 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
|
|||||||
if !h.can(PermissionFileCreate) {
|
if !h.can(PermissionFileCreate) {
|
||||||
return sftp.ErrSSHFxPermissionDenied
|
return sftp.ErrSSHFxPermissionDenied
|
||||||
}
|
}
|
||||||
source, err := h.fs.SafePath(request.Filepath)
|
if err := h.fs.Symlink(request.Filepath, request.Target); err != nil {
|
||||||
if err != nil {
|
l.WithField("target", request.Target).WithField("error", err).Error("failed to create symlink")
|
||||||
return sftp.ErrSSHFxNoSuchFile
|
|
||||||
}
|
|
||||||
target, err := h.fs.SafePath(request.Target)
|
|
||||||
if err != nil {
|
|
||||||
return sftp.ErrSSHFxNoSuchFile
|
|
||||||
}
|
|
||||||
if err := os.Symlink(source, target); err != nil {
|
|
||||||
l.WithField("target", target).WithField("error", err).Error("failed to create symlink")
|
|
||||||
return sftp.ErrSSHFxFailure
|
return sftp.ErrSSHFxFailure
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
@ -274,16 +265,12 @@ func (h *Handler) Filelist(request *sftp.Request) (sftp.ListerAt, error) {
|
|||||||
|
|
||||||
switch request.Method {
|
switch request.Method {
|
||||||
case "List":
|
case "List":
|
||||||
p, err := h.fs.SafePath(request.Filepath)
|
entries, err := h.fs.ReadDirStat(request.Filepath)
|
||||||
if err != nil {
|
|
||||||
return nil, sftp.ErrSSHFxNoSuchFile
|
|
||||||
}
|
|
||||||
files, err := ioutil.ReadDir(p)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.WithField("source", request.Filepath).WithField("error", err).Error("error while listing directory")
|
h.logger.WithField("source", request.Filepath).WithField("error", err).Error("error while listing directory")
|
||||||
return nil, sftp.ErrSSHFxFailure
|
return nil, sftp.ErrSSHFxFailure
|
||||||
}
|
}
|
||||||
return ListerAt(files), nil
|
return ListerAt(entries), nil
|
||||||
case "Stat":
|
case "Stat":
|
||||||
st, err := h.fs.Stat(request.Filepath)
|
st, err := h.fs.Stat(request.Filepath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -107,7 +107,7 @@ func (c *SFTPServer) Run() error {
|
|||||||
go func(conn net.Conn) {
|
go func(conn net.Conn) {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
if err := c.AcceptInbound(conn, conf); err != nil {
|
if err := c.AcceptInbound(conn, conf); err != nil {
|
||||||
log.WithField("error", err).Error("sftp: failed to accept inbound connection")
|
log.WithField("error", err).WithField("ip", conn.RemoteAddr().String()).Error("sftp: failed to accept inbound connection")
|
||||||
}
|
}
|
||||||
}(conn)
|
}(conn)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -31,8 +31,8 @@ func NewSinkPool() *SinkPool {
|
|||||||
// On adds a channel to the sink pool instance.
|
// On adds a channel to the sink pool instance.
|
||||||
func (p *SinkPool) On(c chan []byte) {
|
func (p *SinkPool) On(c chan []byte) {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
p.sinks = append(p.sinks, c)
|
p.sinks = append(p.sinks, c)
|
||||||
p.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Off removes a given channel from the sink pool. If no matching sink is found
|
// Off removes a given channel from the sink pool. If no matching sink is found
|
||||||
@ -69,13 +69,11 @@ func (p *SinkPool) Off(c chan []byte) {
|
|||||||
func (p *SinkPool) Destroy() {
|
func (p *SinkPool) Destroy() {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
|
||||||
for _, c := range p.sinks {
|
for _, c := range p.sinks {
|
||||||
if c != nil {
|
if c != nil {
|
||||||
close(c)
|
close(c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
p.sinks = nil
|
p.sinks = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,6 +96,7 @@ func (p *SinkPool) Destroy() {
|
|||||||
func (p *SinkPool) Push(data []byte) {
|
func (p *SinkPool) Push(data []byte) {
|
||||||
p.mu.RLock()
|
p.mu.RLock()
|
||||||
defer p.mu.RUnlock()
|
defer p.mu.RUnlock()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(p.sinks))
|
wg.Add(len(p.sinks))
|
||||||
for _, c := range p.sinks {
|
for _, c := range p.sinks {
|
||||||
@ -105,15 +104,22 @@ func (p *SinkPool) Push(data []byte) {
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
select {
|
select {
|
||||||
case c <- data:
|
case c <- data:
|
||||||
case <-time.After(time.Millisecond * 10):
|
case <-time.After(10 * time.Millisecond):
|
||||||
// If there is nothing in the channel to read, but we also cannot write
|
// If we cannot send the message to the channel within 10ms,
|
||||||
// to the channel, just skip over sending data. If we don't do this you'll
|
// then try to drop the oldest message from the channel, then
|
||||||
// end up blocking the application on the channel read below.
|
// send our message.
|
||||||
if len(c) == 0 {
|
select {
|
||||||
break
|
case <-c:
|
||||||
|
// Only attempt to send the message if we were able to make
|
||||||
|
// space for it on the channel.
|
||||||
|
select {
|
||||||
|
case c <- data:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Do nothing, this is a fallthrough if there is nothing to
|
||||||
|
// read from c.
|
||||||
}
|
}
|
||||||
<-c
|
|
||||||
c <- data
|
|
||||||
}
|
}
|
||||||
}(c)
|
}(c)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,7 +2,6 @@ package system
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -11,20 +10,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func MutexLocked(m *sync.RWMutex) bool {
|
func MutexLocked(m *sync.RWMutex) bool {
|
||||||
v := reflect.ValueOf(m).Elem()
|
unlocked := m.TryLock()
|
||||||
|
if unlocked {
|
||||||
state := v.FieldByName("w").FieldByName("state")
|
m.Unlock()
|
||||||
|
|
||||||
readerCountField := v.FieldByName("readerCount")
|
|
||||||
// go1.20 changed readerCount to an atomic
|
|
||||||
// ref; https://github.com/golang/go/commit/e509452727b469d89a3fc4a7d1cbf9d3f110efee
|
|
||||||
var readerCount int64
|
|
||||||
if readerCountField.Kind() == reflect.Struct {
|
|
||||||
readerCount = readerCountField.FieldByName("v").Int()
|
|
||||||
} else {
|
|
||||||
readerCount = readerCountField.Int()
|
|
||||||
}
|
}
|
||||||
return state.Int()&1 == 1 || readerCount > 0
|
return !unlocked
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSink(t *testing.T) {
|
func TestSink(t *testing.T) {
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/acobaugh/osrelease"
|
"github.com/acobaugh/osrelease"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/system"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/docker/docker/pkg/parsers/kernel"
|
"github.com/docker/docker/pkg/parsers/kernel"
|
||||||
)
|
)
|
||||||
@ -121,22 +122,22 @@ func GetSystemInformation() (*Information, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetDockerInfo(ctx context.Context) (types.Version, types.Info, error) {
|
func GetDockerInfo(ctx context.Context) (types.Version, system.Info, error) {
|
||||||
// TODO: find a way to re-use the client from the docker environment.
|
// TODO: find a way to re-use the client from the docker environment.
|
||||||
c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.Version{}, types.Info{}, err
|
return types.Version{}, system.Info{}, err
|
||||||
}
|
}
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
|
||||||
dockerVersion, err := c.ServerVersion(ctx)
|
dockerVersion, err := c.ServerVersion(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.Version{}, types.Info{}, err
|
return types.Version{}, system.Info{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dockerInfo, err := c.Info(ctx)
|
dockerInfo, err := c.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.Version{}, types.Info{}, err
|
return types.Version{}, system.Info{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return dockerVersion, dockerInfo, nil
|
return dockerVersion, dockerInfo, nil
|
||||||
|
|||||||
@ -3,13 +3,13 @@ package system
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
package system
|
package system
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -8,7 +9,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/franela/goblin"
|
. "github.com/franela/goblin"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_Utils(t *testing.T) {
|
func Test_Utils(t *testing.T) {
|
||||||
|
|||||||
BIN
wings-api.paw
BIN
wings-api.paw
Binary file not shown.
Loading…
x
Reference in New Issue
Block a user