Compare commits

..

1 Commits

Author SHA1 Message Date
Pterodactyl CI
4902051abd bump version for release 2023-02-08 23:33:33 +00:00
94 changed files with 3033 additions and 3225 deletions

View File

@@ -1,21 +0,0 @@
root = true
[*]
indent_style = tab
indent_size = 4
tab_width = 4
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.go]
max_line_length = 100
[*.md]
trim_trailing_whitespace = false
[*.{md,nix,yaml}]
indent_style = space
indent_size = 2
tab_width = 2

8
.envrc
View File

@@ -1,8 +0,0 @@
#!/usr/bin/env sh
# Load the flake's `devShells.${currentSystem}.default`.
if ! use flake .; then
echo 'The development shell was unable to be built.' >&2
echo 'The development environment was not loaded.' >&2
echo 'Please make the necessary changes in flake.nix to fix any issues and hit enter to try again.' >&2
fi

View File

@@ -1 +1 @@
github: [pterodactyl]
github: [matthewpi]

View File

@@ -1,10 +0,0 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: /
schedule:
interval: monthly
- package-ecosystem: gomod
directory: /
schedule:
interval: weekly

View File

@@ -13,26 +13,30 @@ on:
jobs:
analyze:
name: Analyze
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
include:
- language: go
build-mode: autobuild
runs-on: ubuntu-20.04
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language:
- go
steps:
- name: Code checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Code Checkout
uses: actions/checkout@v3
- name: Initialize CodeQL
uses: github/codeql-action/init@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
- name: Autobuild
uses: github/codeql-action/autobuild@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@0499de31b99561a6d14a36a5f662c2a54f91beee # v3.29.5
uses: github/codeql-action/analyze@v2

View File

@@ -11,21 +11,18 @@ on:
jobs:
build-and-push:
name: Build and Push
runs-on: ubuntu-24.04
runs-on: ubuntu-20.04
# Always run against a tag, even if the commit into the tag has [docker skip] within the commit message.
if: "!contains(github.ref, 'develop') || (!contains(github.event.head_commit.message, 'skip docker') && !contains(github.event.head_commit.message, 'docker skip'))"
permissions:
contents: read
packages: write
steps:
- name: Code checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@v3
- name: Docker metadata
id: docker_meta
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}
images: ghcr.io/pterodactyl/wings
flavor: |
latest=false
tags: |
@@ -34,26 +31,26 @@ jobs:
type=ref,event=branch
- name: Setup QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
uses: docker/setup-qemu-action@v2
- name: Setup Docker buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
username: ${{ github.repository_owner }}
password: ${{ secrets.REGISTRY_TOKEN }}
- name: Get Build Information
id: build_info
run: |
echo "version_tag=${GITHUB_REF/refs\/tags\/v/}" >> "$GITHUB_OUTPUT"
echo "short_sha=$(git rev-parse --short HEAD)" >> "$GITHUB_OUTPUT"
echo "version_tag=${GITHUB_REF/refs\/tags\/v/}" >> $GITHUB_OUTPUT
echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
- name: Build and Push (tag)
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
uses: docker/build-push-action@v3
if: "github.event_name == 'release' && github.event.action == 'published'"
with:
context: .
@@ -66,7 +63,7 @@ jobs:
tags: ${{ steps.docker_meta.outputs.tags }}
- name: Build and Push (develop)
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
uses: docker/build-push-action@v3
if: "github.event_name == 'push' && contains(github.ref, 'develop')"
with:
context: .

View File

@@ -15,21 +15,45 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-24.04]
go: ["1.25.6"]
os: [ubuntu-20.04]
go: ["1.18.10", "1.19.5"]
goos: [linux]
goarch: [amd64, arm64]
permissions:
contents: read
steps:
- name: Code checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
steps:
- name: Setup Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
uses: actions/setup-go@v3
with:
go-version: ${{ matrix.go }}
- name: Code Checkout
uses: actions/checkout@v3
- name: Gather environment variables
id: env
run: |
printf "Go Executable Path: $(which go)\n"
printf "Go Version: $(go version)\n"
printf "\n\nGo Environment:\n\n"
go env
printf "\n\nSystem Environment:\n\n"
env
printf "Git Version: $(git version)\n\n"
echo "version_tag=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
echo "go_cache=$(go env GOCACHE)" >> $GITHUB_OUTPUT
echo "go_mod_cache=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
- name: Build Cache
uses: actions/cache@v3
with:
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
path: |
${{ steps.env.outputs.go_cache }}
${{ steps.env.outputs.go_mod_cache }}
- name: go mod download
env:
CGO_ENABLED: 0
@@ -43,8 +67,8 @@ jobs:
CGO_ENABLED: 0
SRC_PATH: github.com/pterodactyl/wings
run: |
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GITHUB_SHA:0:7}" -o dist/wings ${SRC_PATH}
go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GITHUB_SHA:0:7}" -o dist/wings_debug ${SRC_PATH}
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o dist/wings ${SRC_PATH}
go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o dist/wings_debug ${SRC_PATH}
chmod 755 dist/*
- name: go test
@@ -62,15 +86,15 @@ jobs:
go test -race $(go list ./...)
- name: Upload Release Artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
if: ${{ (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') }}
uses: actions/upload-artifact@v3
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
with:
name: wings_linux_${{ matrix.goarch }}
path: dist/wings
- name: Upload Debug Artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
if: ${{ (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') }}
uses: actions/upload-artifact@v3
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
with:
name: wings_linux_${{ matrix.goarch }}_debug
path: dist/wings_debug

View File

@@ -1,56 +1,97 @@
name: Release
on:
push:
tags:
- "v*"
jobs:
release:
name: Release
runs-on: ubuntu-24.04
permissions:
contents: write
runs-on: ubuntu-20.04
steps:
- name: Code checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Code Checkout
uses: actions/checkout@v3
- name: Setup Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
uses: actions/setup-go@v3
with:
go-version: 1.25.6
go-version: "1.18.10"
- name: Build release binaries
env:
CGO_ENABLED: 0
REF: ${{ github.ref }}
run: |
GOARCH=amd64 go build -o dist/wings_linux_amd64 -v -trimpath -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=${{ github.ref_name }}" github.com/pterodactyl/wings
GOARCH=amd64 go build -o dist/wings_linux_amd64 -v -trimpath -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=${REF:11}" github.com/pterodactyl/wings
chmod 755 dist/wings_linux_amd64
GOARCH=arm64 go build -o dist/wings_linux_arm64 -v -trimpath -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=${{ github.ref_name }}" github.com/pterodactyl/wings
GOARCH=arm64 go build -o dist/wings_linux_arm64 -v -trimpath -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=${REF:11}" github.com/pterodactyl/wings
chmod 755 dist/wings_linux_arm64
- name: Extract changelog
env:
REF: ${{ github.ref }}
run: |
sed -n "/^## ${REF:10}/,/^## /{/^## /b;p}" CHANGELOG.md > ./RELEASE_CHANGELOG
- name: Create checksum and add to changelog
run: |
SUM=`cd dist && sha256sum wings_linux_amd64`
SUM2=`cd dist && sha256sum wings_linux_arm64`
echo -e "\n#### SHA256 Checksum\n\`\`\`\n$SUM\n$SUM2\n\`\`\`\n" >> ./RELEASE_CHANGELOG
echo -e "$SUM\n$SUM2" > checksums.txt
- name: Create release branch
env:
VERSION: ${{ github.ref_name }}
REF: ${{ github.ref }}
run: |
BRANCH=release/${{ env.VERSION }}
BRANCH=release/${REF:10}
git config --local user.email "ci@pterodactyl.io"
git config --local user.name "Pterodactyl CI"
git checkout -b $BRANCH
git push -u origin $BRANCH
sed -i "s/var Version = \".*\"/var Version = \"${VERSION:1}\"/" system/const.go
sed -i "s/var Version = \".*\"/var Version = \"${REF:11}\"/" system/const.go
git add system/const.go
git commit -m "ci(release): bump version"
git commit -m "bump version for release"
git push
- name: write changelog
run: |
sed -n "/^## ${{ github.ref_name }}/,/^## /{/^## /b;p}" CHANGELOG.md > ./RELEASE_CHANGELOG
- uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2.5.0
- name: Create release
id: create_release
uses: softprops/action-gh-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
draft: true
prerelease: ${{ contains(github.ref_name, 'rc') || contains(github.ref_name, 'beta') || contains(github.ref_name, 'alpha') }}
prerelease: ${{ contains(github.ref, 'rc') || contains(github.ref, 'beta') || contains(github.ref, 'alpha') }}
body_path: ./RELEASE_CHANGELOG
files: |
dist/*
- name: Upload amd64 binary
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: dist/wings_linux_amd64
asset_name: wings_linux_amd64
asset_content_type: application/octet-stream
- name: Upload arm64 binary
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: dist/wings_linux_arm64
asset_name: wings_linux_arm64
asset_content_type: application/octet-stream
- name: Upload checksum
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./checksums.txt
asset_name: checksums.txt
asset_content_type: text/plain

View File

@@ -1,77 +1,5 @@
# Changelog
## v1.12.1
### Added
* Add mount for /etc/machine-id for servers for Hytale ([#292](https://github.com/pterodactyl/wings/pull/292))
## v1.12.0
### Fixed
* [CVE-2025-68954](https://github.com/pterodactyl/panel/security/advisories/GHSA-8c39-xppg-479c)
* [CVE-2025-69199](https://github.com/pterodactyl/panel/security/advisories/GHSA-8w7m-w749-rx98)
* [CVE-2026-21696](https://github.com/pterodactyl/wings/security/advisories/GHSA-2497-gp99-2m74)
* Fixes folders not being sorted before files when returning list of directory contents ([#5078](https://github.com/pterodactyl/panel/issues/5078))
* User-defined labels not being passed to environment ([#191](https://github.com/pterodactyl/wings/pulls/191))
* Fixes handling of termination signals for containers ([#192](https://github.com/pterodactyl/wings/pulls/192))
* Fixes logic to use base2 (1024, *bibyte) when calculating memory limits ([#190](https://github.com/pterodactyl/wings/pulls/190))
* Fixes hard-links being counted multiple times when calculating disk usage ([#181](https://github.com/pterodactyl/wings/pulls/181))
### Added
* Support relative file paths for the Wings config ([#180](https://github.com/pterodactyl/wings/pull/180))
* Support mounting generated `/etc/passwd` files to containers ([#197](https://github.com/pterodactyl/wings/pulls/197))
## v1.11.13
### Fixed
* Auto-configure not working ([#5087](https://github.com/pterodactyl/panel/issues/5087))
* Individual files unable to be decompressed ([#5034](https://github.com/pterodactyl/panel/issues/5034))
## v1.11.12
### Fixed
* Arbitrary File Write/Read ([GHSA-gqmf-jqgv-v8fw](https://github.com/pterodactyl/wings/security/advisories/GHSA-gqmf-jqgv-v8fw))
* Server-side Request Forgery (SSRF) during remote file pull ([GHSA-qq22-jj8x-4wwv](https://github.com/pterodactyl/wings/security/advisories/GHSA-qq22-jj8x-4wwv))
* Invalid `Content-Type` being used with the `wings diagnostics` command ([#186](https://github.com/pterodactyl/wings/pull/186))
## v1.11.11
### Fixed
* Backups missing content when a `.pteroignore` file is used
* Archives originating from a subdirectory not containing any files ([#5030](https://github.com/pterodactyl/panel/issues/5030))
## v1.11.10
### Fixed
* Archives randomly ignoring files and directories ([#5027](https://github.com/pterodactyl/panel/issues/5027))
* Crash when deleting or transferring a server ([#5028](https://github.com/pterodactyl/panel/issues/5028))
## v1.11.9
### Changed
* Release binaries are now built with Go 1.21.8
* Updated Go dependencies
### Fixed
* [CVE-2024-27102](https://www.cve.org/CVERecord?id=CVE-2024-27102)
## v1.11.8
### Changed
* Release binaries are now built with Go 1.20.10 (resolves [CVE-2023-44487](https://www.cve.org/CVERecord?id=CVE-2023-44487))
* Updated Go dependencies
## v1.11.7
### Changed
* Updated Go dependencies (this resolves an issue related to `http: invalid Host header` with Docker)
* Wings is now built with go1.19.11
## v1.11.6
### Fixed
* CVE-2023-32080
## v1.11.5
### Added
* Added a config option to disable Wings config.yml updates from the Panel (https://github.com/pterodactyl/wings/commit/ec6d6d83ea3eb14995c24f001233e85b37ffb87b)
### Changed
* Wings is now built with Go 1.19.7
### Fixed
* Fixed archives containing partially matched file names (https://github.com/pterodactyl/wings/commit/43b3496f0001cec231c80af1f9a9b3417d04e8d4)
## v1.11.4
### Fixed
* CVE-2023-25168
@@ -140,10 +68,6 @@
* Archive progress is now reported correctly.
* Labels for containers can now be set by the Panel.
## v1.7.5
### Fixed
* CVE-2023-32080
## v1.7.4
### Fixed
* CVE-2023-25168

View File

@@ -1,8 +1,8 @@
# Stage 1 (Build)
FROM golang:1.25.6-alpine AS builder
FROM golang:1.18-alpine AS builder
ARG VERSION
RUN apk add --update --no-cache git make mailcap
RUN apk add --update --no-cache git make
WORKDIR /app/
COPY go.mod go.sum /app/
RUN go mod download
@@ -18,11 +18,8 @@ RUN echo "ID=\"distroless\"" > /etc/os-release
# Stage 2 (Final)
FROM gcr.io/distroless/static:latest
COPY --from=builder /etc/os-release /etc/os-release
COPY --from=builder /etc/mime.types /etc/mime.types
COPY --from=builder /app/wings /usr/bin/
CMD [ "/usr/bin/wings", "--config", "/etc/pterodactyl/config.yml" ]
ENTRYPOINT ["/usr/bin/wings"]
CMD ["--config", "/etc/pterodactyl/config.yml"]
EXPOSE 8080 2022
EXPOSE 8080

View File

@@ -15,17 +15,20 @@ dependencies, and allowing users to authenticate with the same credentials they
## Sponsors
I would like to extend my sincere thanks to the following sponsors for helping fund Pterodactyl's development.
[Interested in becoming a sponsor?](https://github.com/sponsors/pterodactyl)
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's development.
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
| Company | About |
|-----------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
| [**MineStrator**](https://minestrator.com/) | Looking for the most highend French hosting company for your minecraft server? More than 24,000 members on our discord trust us. Give us a try! |
| [**HostEZ**](https://hostez.io) | US & EU Rust & Minecraft Hosting. DDoS Protected bare metal, VPS and colocation with low latency, high uptime and maximum availability. EZ! |
| [**Blueprint**](https://blueprint.zip/?utm_source=pterodactyl&utm_medium=sponsor) | Create and install Pterodactyl addons and themes with the growing Blueprint framework - the package-manager for Pterodactyl. Use multiple modifications at once without worrying about conflicts and make use of the large extension ecosystem. |
| [**indifferent broccoli**](https://indifferentbroccoli.com/) | indifferent broccoli is a game server hosting and rental company. With us, you get top-notch computer power for your gaming sessions. We destroy lag, latency, and complexity--letting you focus on the fun stuff. |
| Company | About |
|-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [**WISP**](https://wisp.gg) | Extra features. |
| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
| [**MineStrator**](https://minestrator.com/) | Looking for the most highend French hosting company for your minecraft server? More than 24,000 members on our discord trust us. Give us a try! |
| [**Skynode**](https://www.skynode.pro/) | Skynode provides blazing fast game servers along with a top-notch user experience. Whatever our clients are looking for, we're able to provide it! |
| [**VibeGAMES**](https://vibegames.net/) | VibeGAMES is a game server provider that specializes in DDOS protection for the games we offer. We have multiple locations in the US, Brazil, France, Germany, Singapore, Australia and South Africa. |
| [**Pterodactyl Market**](https://pterodactylmarket.com/) | Pterodactyl Market is a one-and-stop shop for Pterodactyl. In our market, you can find Add-ons, Themes, Eggs, and more for Pterodactyl. |
| [**UltraServers**](https://ultraservers.com/) | Deploy premium games hosting with the click of a button. Manage and swap games with ease and let us take care of the rest. We currently support Minecraft, Rust, ARK, 7 Days to Die, Garys MOD, CS:GO, Satisfactory and others. |
| [**Realms Hosting**](https://realmshosting.com/) | Want to build your Gaming Empire? Use Realms Hosting today to kick start your game server hosting with outstanding DDOS Protection, 24/7 Support, Cheap Prices and a Custom Control Panel. | |
## Documentation

View File

@@ -2,7 +2,6 @@ package cmd
import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
@@ -14,6 +13,7 @@ import (
"github.com/AlecAivazis/survey/v2"
"github.com/AlecAivazis/survey/v2/terminal"
"github.com/goccy/go-json"
"github.com/spf13/cobra"
"github.com/pterodactyl/wings/config"
@@ -125,7 +125,7 @@ func configureCmdRun(cmd *cobra.Command, args []string) {
}
fmt.Printf("%+v", req.Header)
fmt.Println(req.URL.String())
fmt.Printf(req.URL.String())
res, err := c.Do(req)
if err != nil {
@@ -155,9 +155,6 @@ func configureCmdRun(cmd *cobra.Command, args []string) {
panic(err)
}
// Manually specify the Panel URL as it won't be decoded from JSON.
cfg.PanelLocation = configureArgs.PanelURL
if err = config.WriteToDisk(cfg); err != nil {
panic(err)
}

View File

@@ -2,7 +2,6 @@ package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
@@ -18,9 +17,9 @@ import (
"github.com/AlecAivazis/survey/v2/terminal"
"github.com/apex/log"
"github.com/docker/docker/api/types"
dockersystem "github.com/docker/docker/api/types/system"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/parsers/operatingsystem"
"github.com/goccy/go-json"
"github.com/spf13/cobra"
"github.com/pterodactyl/wings/config"
@@ -207,18 +206,18 @@ func diagnosticsCmdRun(*cobra.Command, []string) {
}
}
func getDockerInfo() (types.Version, dockersystem.Info, error) {
func getDockerInfo() (types.Version, types.Info, error) {
client, err := environment.Docker()
if err != nil {
return types.Version{}, dockersystem.Info{}, err
return types.Version{}, types.Info{}, err
}
dockerVersion, err := client.ServerVersion(context.Background())
if err != nil {
return types.Version{}, dockersystem.Info{}, err
return types.Version{}, types.Info{}, err
}
dockerInfo, err := client.Info(context.Background())
if err != nil {
return types.Version{}, dockersystem.Info{}, err
return types.Version{}, types.Info{}, err
}
return dockerVersion, dockerInfo, nil
}
@@ -230,8 +229,8 @@ func uploadToHastebin(hbUrl, content string) (string, error) {
return "", err
}
u.Path = path.Join(u.Path, "documents")
res, err := http.Post(u.String(), "text/plain", r)
if err != nil || res.StatusCode < 200 || res.StatusCode >= 300 {
res, err := http.Post(u.String(), "plain/text", r)
if err != nil || res.StatusCode != 200 {
fmt.Println("Failed to upload report to ", u.String(), err)
return "", err
}

View File

@@ -13,7 +13,7 @@ import (
"path/filepath"
"runtime"
"strconv"
"syscall"
"strings"
"time"
"github.com/NYTimes/logrotate"
@@ -104,7 +104,6 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
if err := config.ConfigureTimezone(); err != nil {
log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value")
return
}
log.WithField("timezone", config.Get().System.Timezone).Info("configured wings with system timezone")
if err := config.ConfigureDirectories(); err != nil {
@@ -113,11 +112,6 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
}
if err := config.EnsurePterodactylUser(); err != nil {
log.WithField("error", err).Fatal("failed to create pterodactyl system user")
return
}
if err := config.ConfigurePasswd(); err != nil {
log.WithField("error", err).Fatal("failed to configure container passwd file")
return
}
log.WithFields(log.Fields{
"username": config.Get().System.Username,
@@ -129,10 +123,9 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
return
}
t := config.Get().Token
pclient := remote.New(
config.Get().PanelLocation,
remote.WithCredentials(t.ID, t.Token),
remote.WithCredentials(config.Get().AuthenticationTokenId, config.Get().AuthenticationToken),
remote.WithHttpClient(&http.Client{
Timeout: time.Second * time.Duration(config.Get().RemoteQuery.Timeout),
}),
@@ -140,26 +133,19 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
if err := database.Initialize(); err != nil {
log.WithField("error", err).Fatal("failed to initialize database")
return
}
manager, err := server.NewManager(cmd.Context(), pclient)
if err != nil {
log.WithField("error", err).Fatal("failed to load server configurations")
return
}
if err := environment.ConfigureDocker(cmd.Context()); err != nil {
log.WithField("error", err).Fatal("failed to configure docker environment")
return
}
if err := config.WriteToDisk(config.Get()); err != nil {
if !errors.Is(err, syscall.EROFS) {
log.WithField("error", err).Error("failed to write configuration to disk")
} else {
log.WithField("error", err).Debug("failed to write configuration to disk")
}
log.WithField("error", err).Fatal("failed to write configuration to disk")
}
// Just for some nice log output.
@@ -197,9 +183,9 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
for _, serv := range manager.All() {
s := serv
// For each server ensure the minimal environment is configured for the server.
if err := s.CreateEnvironment(); err != nil {
s.Log().WithField("error", err).Error("could not create base environment for server...")
// For each server we encounter make sure the root data directory exists.
if err := s.EnsureDataDirectoryExists(); err != nil {
s.Log().Error("could not create root data directory for server: not loading server...")
continue
}
@@ -393,14 +379,13 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
// Reads the configuration from the disk and then sets up the global singleton
// with all the configuration values.
func initConfig() {
if !filepath.IsAbs(configPath) {
d, err := filepath.Abs(configPath)
if !strings.HasPrefix(configPath, "/") {
d, err := os.Getwd()
if err != nil {
log2.Fatalf("cmd/root: failed to get path to config file: %s", err)
log2.Fatalf("cmd/root: could not determine directory: %s", err)
}
configPath = d
configPath = path.Clean(path.Join(d, configPath))
}
err := config.FromFile(configPath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
@@ -455,18 +440,18 @@ in all copies or substantial portions of the Software.%s`), system.Version, time
}
func exitWithConfigurationNotice() {
fmt.Printf(colorstring.Color(`
fmt.Print(colorstring.Color(`
[_red_][white][bold]Error: Configuration File Not Found[reset]
Wings was not able to locate your configuration file, and therefore is not
able to complete its boot process. Please ensure you have copied your instance
configuration file into the default location below.
Default Location: %s
Default Location: /etc/pterodactyl/config.yml
[yellow]This is not a bug with this software. Please do not make a bug report
for this issue, it will be closed.[reset]
`), config.DefaultLocation)
`))
os.Exit(1)
}

View File

@@ -1,7 +1,6 @@
package config
import (
"bytes"
"context"
"crypto/tls"
"fmt"
@@ -88,7 +87,7 @@ type ApiConfiguration struct {
// Determines if functionality for allowing remote download of files into server directories
// is enabled on this instance. If set to "true" remote downloads will not be possible for
// servers.
DisableRemoteDownload bool `json:"-" yaml:"disable_remote_download"`
DisableRemoteDownload bool `json:"disable_remote_download" yaml:"disable_remote_download"`
// The maximum size for files uploaded through the Panel in MB.
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
@@ -122,23 +121,23 @@ type RemoteQueryConfiguration struct {
// SystemConfiguration defines basic system configuration settings.
type SystemConfiguration struct {
// The root directory where all of the pterodactyl data is stored at.
RootDirectory string `default:"/var/lib/pterodactyl" json:"-" yaml:"root_directory"`
RootDirectory string `default:"/var/lib/pterodactyl" yaml:"root_directory"`
// Directory where logs for server installations and other wings events are logged.
LogDirectory string `default:"/var/log/pterodactyl" json:"-" yaml:"log_directory"`
LogDirectory string `default:"/var/log/pterodactyl" yaml:"log_directory"`
// Directory where the server data is stored at.
Data string `default:"/var/lib/pterodactyl/volumes" json:"-" yaml:"data"`
Data string `default:"/var/lib/pterodactyl/volumes" yaml:"data"`
// Directory where server archives for transferring will be stored.
ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" json:"-" yaml:"archive_directory"`
ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" yaml:"archive_directory"`
// Directory where local backups will be stored on the machine.
BackupDirectory string `default:"/var/lib/pterodactyl/backups" json:"-" yaml:"backup_directory"`
BackupDirectory string `default:"/var/lib/pterodactyl/backups" yaml:"backup_directory"`
// TmpDirectory specifies where temporary files for Pterodactyl installation processes
// should be created. This supports environments running docker-in-docker.
TmpDirectory string `default:"/tmp/pterodactyl" json:"-" yaml:"tmp_directory"`
TmpDirectory string `default:"/tmp/pterodactyl" yaml:"tmp_directory"`
// The user that should own all of the server files, and be used for containers.
Username string `default:"pterodactyl" yaml:"username"`
@@ -171,45 +170,6 @@ type SystemConfiguration struct {
Gid int `yaml:"gid"`
} `yaml:"user"`
// Passwd controls the mounting of a generated passwd files into containers started by Wings.
Passwd struct {
// Enable controls whether generated passwd files should be mounted into containers.
//
// By default this option is disabled and Wings will not mount any
// additional passwd files into containers.
Enable bool `yaml:"enabled" default:"false"`
// Directory is the directory on disk where the generated passwd files will be stored.
// This directory may be temporary as it will be re-created whenever Wings is started.
//
// This path **WILL** be both written to by Wings and mounted into containers created by
// Wings. If you are running Wings itself in a container, this path will need to be mounted
// into the Wings container as the exact path on the host, which should match the value
// specified here. If you are using SELinux, you will need to make sure this file has the
// correct SELinux context in order for containers to use it.
Directory string `yaml:"directory" default:"/run/wings/etc"`
} `yaml:"passwd"`
// MachineID controls the mounting of a generated `/etc/machine-id` file into containers started by Wings.
MachineID struct {
// Enable controls whether a generated machine-id file should be mounted
// into containers.
//
// By default this option is enabled and Wings will mount an additional
// machine-id file into containers.
Enable bool `yaml:"enabled" default:"true"`
// Directory is the directory on disk where the generated machine-id files will be stored.
// This directory may be temporary as it will be re-created whenever Wings is started.
//
// This path **WILL** be both written to by Wings and mounted into containers created by
// Wings. If you are running Wings itself in a container, this path will need to be mounted
// into the Wings container as the exact path on the host, which should match the value
// specified here. If you are using SELinux, you will need to make sure this file has the
// correct SELinux context in order for containers to use it.
Directory string `yaml:"directory" default:"/run/wings/machine-id"`
} `yaml:"machine_id"`
// The amount of time in seconds that can elapse before a server's disk space calculation is
// considered stale and a re-check should occur. DANGER: setting this value too low can seriously
// impact system performance and cause massive I/O bottlenecks and high CPU usage for the Wings
@@ -311,14 +271,7 @@ type ConsoleThrottles struct {
Period uint64 `json:"line_reset_interval" yaml:"line_reset_interval" default:"100"`
}
type Token struct {
ID string
Token string
}
type Configuration struct {
Token Token `json:"-" yaml:"-"`
// The location from which this configuration instance was instantiated.
path string
@@ -349,7 +302,7 @@ type Configuration struct {
// The location where the panel is running that this daemon should connect to
// to collect data and send events.
PanelLocation string `json:"-" yaml:"remote"`
PanelLocation string `json:"remote" yaml:"remote"`
RemoteQuery RemoteQueryConfiguration `json:"remote_query" yaml:"remote_query"`
// AllowedMounts is a list of allowed host-system mount points.
@@ -366,9 +319,6 @@ type Configuration struct {
// is only required by users running Wings without SSL certificates and using internal IP
// addresses in order to connect. Most users should NOT enable this setting.
AllowCORSPrivateNetwork bool `json:"allow_cors_private_network" yaml:"allow_cors_private_network"`
// IgnorePanelConfigUpdates causes confiuration updates that are sent by the panel to be ignored.
IgnorePanelConfigUpdates bool `json:"ignore_panel_config_updates" yaml:"ignore_panel_config_updates"`
}
// NewAtPath creates a new struct and set the path where it should be stored.
@@ -391,16 +341,11 @@ func NewAtPath(path string) (*Configuration, error) {
// will be paused until it is complete.
func Set(c *Configuration) {
mu.Lock()
defer mu.Unlock()
token := c.Token.Token
if token == "" {
c.Token.Token = c.AuthenticationToken
token = c.Token.Token
}
if _config == nil || _config.Token.Token != token {
_jwtAlgo = jwt.NewHS256([]byte(token))
if _config == nil || _config.AuthenticationToken != c.AuthenticationToken {
_jwtAlgo = jwt.NewHS256([]byte(c.AuthenticationToken))
}
_config = c
mu.Unlock()
}
// SetDebugViaFlag tracks if the application is running in debug mode because of
@@ -408,9 +353,9 @@ func Set(c *Configuration) {
// change to the disk.
func SetDebugViaFlag(d bool) {
mu.Lock()
defer mu.Unlock()
_config.Debug = d
_debugViaFlag = d
mu.Unlock()
}
// Get returns the global configuration instance. This is a thread-safe operation
@@ -435,8 +380,8 @@ func Get() *Configuration {
// the global configuration.
func Update(callback func(c *Configuration)) {
mu.Lock()
defer mu.Unlock()
callback(_config)
mu.Unlock()
}
// GetJwtAlgorithm returns the in-memory JWT algorithm.
@@ -545,37 +490,6 @@ func EnsurePterodactylUser() error {
return nil
}
// ConfigurePasswd generates required passwd files for use with containers started by Wings.
func ConfigurePasswd() error {
passwd := _config.System.Passwd
if !passwd.Enable {
return nil
}
v := []byte(fmt.Sprintf(
`root:x:0:
container:x:%d:
nogroup:x:65534:`,
_config.System.User.Gid,
))
if err := os.WriteFile(filepath.Join(passwd.Directory, "group"), v, 0o644); err != nil {
return fmt.Errorf("failed to write file to %s/group: %v", passwd.Directory, err)
}
v = []byte(fmt.Sprintf(
`root:x:0:0::/root:/bin/sh
container:x:%d:%d::/home/container:/bin/sh
nobody:x:65534:65534::/var/empty:/bin/sh
`,
_config.System.User.Uid,
_config.System.User.Gid,
))
if err := os.WriteFile(filepath.Join(passwd.Directory, "passwd"), v, 0o644); err != nil {
return fmt.Errorf("failed to write file to %s/passwd: %v", passwd.Directory, err)
}
return nil
}
// FromFile reads the configuration from the provided file and stores it in the
// global singleton for this instance.
func FromFile(path string) error {
@@ -592,26 +506,6 @@ func FromFile(path string) error {
return err
}
c.Token = Token{
ID: os.Getenv("WINGS_TOKEN_ID"),
Token: os.Getenv("WINGS_TOKEN"),
}
if c.Token.ID == "" {
c.Token.ID = c.AuthenticationTokenId
}
if c.Token.Token == "" {
c.Token.Token = c.AuthenticationToken
}
c.Token.ID, err = Expand(c.Token.ID)
if err != nil {
return err
}
c.Token.Token, err = Expand(c.Token.Token)
if err != nil {
return err
}
// Store this configuration in the global state.
Set(c)
return nil
@@ -650,11 +544,6 @@ func ConfigureDirectories() error {
return err
}
log.WithField("path", _config.System.TmpDirectory).Debug("ensuring temporary data directory exists")
if err := os.MkdirAll(_config.System.TmpDirectory, 0o700); err != nil {
return err
}
log.WithField("path", _config.System.ArchiveDirectory).Debug("ensuring archive data directory exists")
if err := os.MkdirAll(_config.System.ArchiveDirectory, 0o700); err != nil {
return err
@@ -665,20 +554,6 @@ func ConfigureDirectories() error {
return err
}
if _config.System.Passwd.Enable {
log.WithField("path", _config.System.Passwd.Directory).Debug("ensuring passwd directory exists")
if err := os.MkdirAll(_config.System.Passwd.Directory, 0o755); err != nil {
return err
}
}
if _config.System.MachineID.Enable {
log.WithField("path", _config.System.MachineID.Directory).Debug("ensuring machine-id directory exists")
if err := os.MkdirAll(_config.System.MachineID.Directory, 0o755); err != nil {
return err
}
}
return nil
}
@@ -793,36 +668,3 @@ func getSystemName() (string, error) {
}
return release["ID"], nil
}
// Expand expands an input string by calling [os.ExpandEnv] to expand all
// environment variables, then checks if the value is prefixed with `file://`
// to support reading the value from a file.
//
// NOTE: the order of expanding environment variables first then checking if
// the value references a file is important. This behaviour allows a user to
// pass a value like `file://${CREDENTIALS_DIRECTORY}/token` to allow us to
// work with credentials loaded by systemd's `LoadCredential` (or `LoadCredentialEncrypted`)
// options without the user needing to assume the path of `CREDENTIALS_DIRECTORY`
// or use a preStart script to read the files for us.
func Expand(v string) (string, error) {
// Expand environment variables within the string.
//
// NOTE: this may cause issues if the string contains `$` and doesn't intend
// on getting expanded, however we are using this for our tokens which are
// all alphanumeric characters only.
v = os.ExpandEnv(v)
// Handle files.
const filePrefix = "file://"
if strings.HasPrefix(v, filePrefix) {
p := v[len(filePrefix):]
b, err := os.ReadFile(p)
if err != nil {
return "", nil
}
v = string(bytes.TrimRight(bytes.TrimRight(b, "\r"), "\n"))
}
return v, nil
}

View File

@@ -2,11 +2,11 @@ package config
import (
"encoding/base64"
"encoding/json"
"sort"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/registry"
"github.com/goccy/go-json"
)
type dockerNetworkInterfaces struct {
@@ -115,7 +115,7 @@ type RegistryConfiguration struct {
// Base64 returns the authentication for a given registry as a base64 encoded
// string value.
func (c RegistryConfiguration) Base64() (string, error) {
b, err := json.Marshal(registry.AuthConfig{
b, err := json.Marshal(types.AuthConfig{
Username: c.Username,
Password: c.Password,
})

View File

@@ -23,7 +23,6 @@ services:
- "/var/log/pterodactyl/:/var/log/pterodactyl/"
- "/tmp/pterodactyl/:/tmp/pterodactyl/"
- "/etc/ssl/certs:/etc/ssl/certs:ro"
- "/run/wings:/run/wings"
# you may need /srv/daemon-data if you are upgrading from an old daemon
#- "/srv/daemon-data/:/srv/daemon-data/"
# Required for ssl if you use let's encrypt. uncomment to use.

View File

@@ -7,6 +7,7 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
@@ -38,7 +39,7 @@ func ConfigureDocker(ctx context.Context) error {
}
nw := config.Get().Docker.Network
resource, err := cli.NetworkInspect(ctx, nw.Name, network.InspectOptions{})
resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{})
if err != nil {
if !client.IsErrNotFound(err) {
return err
@@ -71,10 +72,9 @@ func ConfigureDocker(ctx context.Context) error {
// Creates a new network on the machine if one does not exist already.
func createDockerNetwork(ctx context.Context, cli *client.Client) error {
nw := config.Get().Docker.Network
enableIPv6 := true
_, err := cli.NetworkCreate(ctx, nw.Name, network.CreateOptions{
_, err := cli.NetworkCreate(ctx, nw.Name, types.NetworkCreate{
Driver: nw.Driver,
EnableIPv6: &enableIPv6,
EnableIPv6: true,
Internal: nw.IsInternal,
IPAM: &network.IPAM{
Config: []network.IPAMConfig{{

View File

@@ -2,7 +2,6 @@ package docker
import (
"context"
"encoding/json"
"io"
"net/http"
"reflect"
@@ -14,6 +13,7 @@ import (
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
)

View File

@@ -12,10 +12,9 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/buger/jsonparser"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/pterodactyl/wings/config"
@@ -50,7 +49,7 @@ func (e *Environment) Attach(ctx context.Context) error {
return nil
}
opts := container.AttachOptions{
opts := types.ContainerAttachOptions{
Stdin: true,
Stdout: true,
Stderr: true,
@@ -104,7 +103,7 @@ func (e *Environment) Attach(ctx context.Context) error {
// container. This allows memory, cpu, and IO limitations to be adjusted on the
// fly for individual instances.
func (e *Environment) InSituUpdate() error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
if _, err := e.ContainerInspect(ctx); err != nil {
@@ -200,15 +199,14 @@ func (e *Environment) Create() error {
networkName := "ip-" + strings.ReplaceAll(strings.ReplaceAll(a.DefaultMapping.Ip, ".", "-"), ":", "-")
networkMode = container.NetworkMode(networkName)
if _, err := e.client.NetworkInspect(ctx, networkName, network.InspectOptions{}); err != nil {
if _, err := e.client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{}); err != nil {
if !client.IsErrNotFound(err) {
return err
}
enableIPv6 := false
if _, err := e.client.NetworkCreate(ctx, networkName, network.CreateOptions{
if _, err := e.client.NetworkCreate(ctx, networkName, types.NetworkCreate{
Driver: "bridge",
EnableIPv6: &enableIPv6,
EnableIPv6: false,
Internal: false,
Attachable: false,
Ingress: false,
@@ -272,7 +270,7 @@ func (e *Environment) Destroy() error {
// We set it to stopping than offline to prevent crash detection from being triggered.
e.SetState(environment.ProcessStoppingState)
err := e.client.ContainerRemove(context.Background(), e.Id, container.RemoveOptions{
err := e.client.ContainerRemove(context.Background(), e.Id, types.ContainerRemoveOptions{
RemoveVolumes: true,
RemoveLinks: false,
Force: true,
@@ -318,7 +316,7 @@ func (e *Environment) SendCommand(c string) error {
// is running or not, it will simply try to read the last X bytes of the file
// and return them.
func (e *Environment) Readlog(lines int) ([]string, error) {
r, err := e.client.ContainerLogs(context.Background(), e.Id, container.LogsOptions{
r, err := e.client.ContainerLogs(context.Background(), e.Id, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Tail: strconv.Itoa(lines),
@@ -345,25 +343,25 @@ func (e *Environment) Readlog(lines int) ([]string, error) {
// late, and we don't need to block all the servers from booting just because
// of that. I'd imagine in a lot of cases an outage shouldn't affect users too
// badly. It'll at least keep existing servers working correctly if anything.
func (e *Environment) ensureImageExists(img string) error {
func (e *Environment) ensureImageExists(image string) error {
e.Events().Publish(environment.DockerImagePullStarted, "")
defer e.Events().Publish(environment.DockerImagePullCompleted, "")
// Images prefixed with a ~ are local images that we do not need to try and pull.
if strings.HasPrefix(img, "~") {
if strings.HasPrefix(image, "~") {
return nil
}
// Give it up to 15 minutes to pull the image. I think this should cover 99.8% of cases where an
// image pull might fail. I can't imagine it will ever take more than 15 minutes to fully pull
// an image. Let me know when I am inevitably wrong here...
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15)
defer cancel()
// Get a registry auth configuration from the config.
var registryAuth *config.RegistryConfiguration
for registry, c := range config.Get().Docker.Registries {
if !strings.HasPrefix(img, registry) {
if !strings.HasPrefix(image, registry) {
continue
}
@@ -373,7 +371,7 @@ func (e *Environment) ensureImageExists(img string) error {
}
// Get the ImagePullOptions.
imagePullOptions := image.PullOptions{All: false}
imagePullOptions := types.ImagePullOptions{All: false}
if registryAuth != nil {
b64, err := registryAuth.Base64()
if err != nil {
@@ -384,23 +382,23 @@ func (e *Environment) ensureImageExists(img string) error {
imagePullOptions.RegistryAuth = b64
}
out, err := e.client.ImagePull(ctx, img, imagePullOptions)
out, err := e.client.ImagePull(ctx, image, imagePullOptions)
if err != nil {
images, ierr := e.client.ImageList(ctx, image.ListOptions{})
images, ierr := e.client.ImageList(ctx, types.ImageListOptions{})
if ierr != nil {
// Well damn, something has gone really wrong here, just go ahead and abort there
// isn't much anything we can do to try and self-recover from this.
return errors.Wrap(ierr, "environment/docker: failed to list images")
}
for _, img2 := range images {
for _, t := range img2.RepoTags {
if t != img {
for _, img := range images {
for _, t := range img.RepoTags {
if t != image {
continue
}
log.WithFields(log.Fields{
"image": img,
"image": image,
"container_id": e.Id,
"err": err.Error(),
}).Warn("unable to pull requested image from remote source, however the image exists locally")
@@ -411,11 +409,11 @@ func (e *Environment) ensureImageExists(img string) error {
}
}
return errors.Wrapf(err, "environment/docker: failed to pull \"%s\" image for server", img)
return errors.Wrapf(err, "environment/docker: failed to pull \"%s\" image for server", image)
}
defer out.Close()
log.WithField("image", img).Debug("pulling docker image... this could take a bit of time")
log.WithField("image", image).Debug("pulling docker image... this could take a bit of time")
// I'm not sure what the best approach here is, but this will block execution until the image
// is done being pulled, which is what we need.
@@ -433,21 +431,22 @@ func (e *Environment) ensureImageExists(img string) error {
return err
}
log.WithField("image", img).Debug("completed docker image pull")
log.WithField("image", image).Debug("completed docker image pull")
return nil
}
func (e *Environment) convertMounts() []mount.Mount {
mounts := e.Configuration.Mounts()
out := make([]mount.Mount, len(mounts))
for i, m := range mounts {
out[i] = mount.Mount{
var out []mount.Mount
for _, m := range e.Configuration.Mounts() {
out = append(out, mount.Mount{
Type: mount.TypeBind,
Source: m.Source,
Target: m.Target,
ReadOnly: m.ReadOnly,
}
})
}
return out
}

View File

@@ -4,10 +4,12 @@ import (
"context"
"os"
"strings"
"syscall"
"time"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
@@ -25,7 +27,7 @@ import (
// is running does not result in the server becoming un-bootable.
func (e *Environment) OnBeforeStart(ctx context.Context) error {
// Always destroy and re-create the server container to ensure that synced data from the Panel is used.
if err := e.client.ContainerRemove(ctx, e.Id, container.RemoveOptions{RemoveVolumes: true}); err != nil {
if err := e.client.ContainerRemove(ctx, e.Id, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
if !client.IsErrNotFound(err) {
return errors.WrapIf(err, "environment/docker: failed to remove container during pre-boot")
}
@@ -120,7 +122,7 @@ func (e *Environment) Start(ctx context.Context) error {
return errors.WrapIf(err, "environment/docker: failed to attach to container")
}
if err := e.client.ContainerStart(actx, e.Id, container.StartOptions{}); err != nil {
if err := e.client.ContainerStart(actx, e.Id, types.ContainerStartOptions{}); err != nil {
return errors.WrapIf(err, "environment/docker: failed to start container")
}
@@ -141,55 +143,48 @@ func (e *Environment) Stop(ctx context.Context) error {
s := e.meta.Stop
e.mu.RUnlock()
// A native "stop" as the Type field value will just skip over all of this
// logic and end up only executing the container stop command (which may or
// may not work as expected).
if s.Type == "" || s.Type == remote.ProcessStopSignal {
if s.Type == "" {
log.WithField("container_id", e.Id).Warn("no stop configuration detected for environment, using termination procedure")
}
signal := os.Kill
// Handle a few common cases, otherwise just fall through and just pass along
// the os.Kill signal to the process.
switch strings.ToUpper(s.Value) {
case "SIGABRT":
signal = syscall.SIGABRT
case "SIGINT":
signal = syscall.SIGINT
case "SIGTERM":
signal = syscall.SIGTERM
}
return e.Terminate(ctx, signal)
}
// If the process is already offline don't switch it back to stopping. Just leave it how
// it is and continue through to the stop handling for the process.
if e.st.Load() != environment.ProcessOfflineState {
e.SetState(environment.ProcessStoppingState)
}
// Handle signal based actions
if s.Type == remote.ProcessStopSignal {
log.WithField("signal_value", s.Value).Debug("stopping server using signal")
// Handle some common signals - Default to SIGKILL
signal := "SIGKILL"
switch strings.ToUpper(s.Value) {
case "SIGABRT":
signal = "SIGABRT"
case "SIGINT", "C":
signal = "SIGINT"
case "SIGTERM":
signal = "SIGTERM"
case "SIGKILL":
signal = "SIGKILL"
default:
log.Info("Unrecognised signal requested, defaulting to SIGKILL")
}
return e.SignalContainer(ctx, signal)
}
// Handle command based stops
// Only attempt to send the stop command to the instance if we are actually attached to
// the instance. If we are not for some reason, just send the container stop event.
if e.IsAttached() && s.Type == remote.ProcessStopCommand {
return e.SendCommand(s.Value)
}
if s.Type == "" {
log.WithField("container_id", e.Id).Warn("no stop configuration detected for environment, using native docker stop")
}
// Fallback to a native docker stop. As we aren't passing a signal to ContainerStop docker will
// attempt to stop the container using the default stop signal, SIGTERM, unless
// another signal was specified in the Dockerfile
// Allow the stop action to run for however long it takes, similar to executing a command
// and using a different logic pathway to wait for the container to stop successfully.
//
// Using a negative timeout here will allow the container to stop gracefully,
// rather than forcefully terminating it. Value is in seconds, but -1 is
// treated as indefinitely.
timeout := -1
if err := e.client.ContainerStop(ctx, e.Id, container.StopOptions{Timeout: &timeout}); err != nil {
// rather than forcefully terminating it, this value MUST be at least 1
// second, otherwise it will be ignored.
timeout := -1 * time.Second
if err := e.client.ContainerStop(ctx, e.Id, &timeout); err != nil {
// If the container does not exist just mark the process as stopped and return without
// an error.
if client.IsErrNotFound(err) {
@@ -229,7 +224,7 @@ func (e *Environment) WaitForStop(ctx context.Context, duration time.Duration, t
doTermination := func(s string) error {
e.log().WithField("step", s).WithField("duration", duration).Warn("container stop did not complete in time, terminating process...")
return e.Terminate(ctx, "SIGKILL")
return e.Terminate(ctx, os.Kill)
}
// We pass through the timed context for this stop action so that if one of the
@@ -273,8 +268,8 @@ func (e *Environment) WaitForStop(ctx context.Context, duration time.Duration, t
return nil
}
// Sends the specified signal to the container in an attempt to stop it.
func (e *Environment) SignalContainer(ctx context.Context, signal string) error {
// Terminate forcefully terminates the container using the signal provided.
func (e *Environment) Terminate(ctx context.Context, signal os.Signal) error {
c, err := e.ContainerInspect(ctx)
if err != nil {
// Treat missing containers as an okay error state, means it is obviously
@@ -299,23 +294,10 @@ func (e *Environment) SignalContainer(ctx context.Context, signal string) error
// We set it to stopping than offline to prevent crash detection from being triggered.
e.SetState(environment.ProcessStoppingState)
if err := e.client.ContainerKill(ctx, e.Id, signal); err != nil && !client.IsErrNotFound(err) {
sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed")
if err := e.client.ContainerKill(ctx, e.Id, sig); err != nil && !client.IsErrNotFound(err) {
return errors.WithStack(err)
}
return nil
}
// Terminate forcefully terminates the container using the signal provided.
// then sets its state to stopped.
func (e *Environment) Terminate(ctx context.Context, signal string) error {
// Send the signal to the container to kill it
if err := e.SignalContainer(ctx, signal); err != nil {
return errors.WithStack(err)
}
// We expect Terminate to instantly kill the container
// so go ahead and mark it as dead and clean up
e.SetState(environment.ProcessOfflineState)
return nil

View File

@@ -2,13 +2,13 @@ package docker
import (
"context"
"encoding/json"
"io"
"math"
"time"
"emperror.dev/errors"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/environment"
)
@@ -57,7 +57,7 @@ func (e *Environment) pollResources(ctx context.Context) error {
case <-ctx.Done():
return ctx.Err()
default:
var v container.StatsResponse
var v types.StatsJSON
if err := dec.Decode(&v); err != nil {
if err != io.EOF && !errors.Is(err, context.Canceled) {
e.log().WithField("error", err).Warn("error while processing Docker stats output for container")
@@ -95,7 +95,7 @@ func (e *Environment) pollResources(ctx context.Context) error {
}
}
// The "docker stats" CLI call does not return the same value as the [container.MemoryStats].Usage
// The "docker stats" CLI call does not return the same value as the types.MemoryStats.Usage
// value which can be rather confusing to people trying to compare panel usage to
// their stats output.
//
@@ -103,7 +103,7 @@ func (e *Environment) pollResources(ctx context.Context) error {
// bothering me about it. It should also reflect a slightly more correct memory value anyways.
//
// @see https://github.com/docker/cli/blob/96e1d1d6/cli/command/container/stats_helpers.go#L227-L249
func calculateDockerMemory(stats container.MemoryStats) uint64 {
func calculateDockerMemory(stats types.MemoryStats) uint64 {
if v, ok := stats.Stats["total_inactive_file"]; ok && v < stats.Usage {
return stats.Usage - v
}
@@ -119,7 +119,7 @@ func calculateDockerMemory(stats container.MemoryStats) uint64 {
// by the defined CPU limits on the container.
//
// @see https://github.com/docker/cli/blob/aa097cf1aa19099da70930460250797c8920b709/cli/command/container/stats_helpers.go#L166
func calculateDockerAbsoluteCpu(pStats container.CPUStats, stats container.CPUStats) float64 {
func calculateDockerAbsoluteCpu(pStats types.CPUStats, stats types.CPUStats) float64 {
// Calculate the change in CPU usage between the current and previous reading.
cpuDelta := float64(stats.CPUUsage.TotalUsage) - float64(pStats.CPUUsage.TotalUsage)

View File

@@ -2,6 +2,7 @@ package environment
import (
"context"
"os"
"time"
"github.com/pterodactyl/wings/events"
@@ -71,7 +72,7 @@ type ProcessEnvironment interface {
// Terminate stops a running server instance using the provided signal. This function
// is a no-op if the server is already stopped.
Terminate(ctx context.Context, signal string) error
Terminate(ctx context.Context, signal os.Signal) error
// Destroys the environment removing any containers that were created (in Docker
// environments at least).

View File

@@ -34,7 +34,7 @@ type Mount struct {
// Limits is the build settings for a given server that impact docker container
// creation and resource limits for a server instance.
type Limits struct {
// The total amount of memory in mebibytes that this server is allowed to
// The total amount of memory in megabytes that this server is allowed to
// use on the host system.
MemoryLimit int64 `json:"memory_limit"`
@@ -79,7 +79,7 @@ func (l Limits) MemoryOverheadMultiplier() float64 {
}
func (l Limits) BoundedMemoryLimit() int64 {
return int64(math.Round(float64(l.MemoryLimit) * l.MemoryOverheadMultiplier() * 1024 * 1024))
return int64(math.Round(float64(l.MemoryLimit) * l.MemoryOverheadMultiplier() * 1_000_000))
}
// ConvertedSwap returns the amount of swap available as a total in bytes. This
@@ -90,7 +90,7 @@ func (l Limits) ConvertedSwap() int64 {
return -1
}
return (l.Swap * 1024 * 1024) + l.BoundedMemoryLimit()
return (l.Swap * 1_000_000) + l.BoundedMemoryLimit()
}
// ProcessLimit returns the process limit for a container. This is currently
@@ -105,7 +105,7 @@ func (l Limits) AsContainerResources() container.Resources {
pids := l.ProcessLimit()
resources := container.Resources{
Memory: l.BoundedMemoryLimit(),
MemoryReservation: l.MemoryLimit * 1024 * 1024,
MemoryReservation: l.MemoryLimit * 1_000_000,
MemorySwap: l.ConvertedSwap(),
BlkioWeight: l.IoWeight,
OomKillDisable: &l.OOMDisabled,

View File

@@ -1,10 +1,10 @@
package events
import (
"encoding/json"
"strings"
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/system"
)

82
flake.lock generated
View File

@@ -1,82 +0,0 @@
{
"nodes": {
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1768135262,
"narHash": "sha256-PVvu7OqHBGWN16zSi6tEmPwwHQ4rLPU9Plvs8/1TUBY=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "80daad04eddbbf5a4d883996a73f3f542fa437ac",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1768127708,
"narHash": "sha256-1Sm77VfZh3mU0F5OqKABNLWxOuDeHIlcFjsXeeiPazs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ffbc9f8cbaacfb331b6017d5a5abb21a492c9a38",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1765674936,
"narHash": "sha256-k00uTP4JNfmejrCLJOwdObYC9jHRrr/5M/a/8L2EIdo=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "2075416fcb47225d9b68ac469a5c4801a9c4dd85",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"root": {
"inputs": {
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs",
"treefmt-nix": "treefmt-nix"
}
},
"treefmt-nix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1768158989,
"narHash": "sha256-67vyT1+xClLldnumAzCTBvU0jLZ1YBcf4vANRWP3+Ak=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "e96d59dff5c0d7fddb9d113ba108f03c3ef99eca",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -1,54 +0,0 @@
{
description = "Wings";
inputs = {
flake-parts.url = "github:hercules-ci/flake-parts";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
treefmt-nix = {
url = "github:numtide/treefmt-nix";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = {...} @ inputs:
inputs.flake-parts.lib.mkFlake {inherit inputs;} {
systems = inputs.nixpkgs.lib.systems.flakeExposed;
imports = [
inputs.treefmt-nix.flakeModule
];
perSystem = {system, ...}: let
pkgs = import inputs.nixpkgs {inherit system;};
in {
devShells.default = pkgs.mkShell {
buildInputs = with pkgs; [
go_1_24
gofumpt
golangci-lint
gotools
];
};
treefmt = {
projectRootFile = "flake.nix";
programs = {
alejandra.enable = true;
deadnix.enable = true;
gofumpt = {
enable = true;
extra = true;
};
shellcheck.enable = true;
shfmt = {
enable = true;
indent_size = 0; # 0 causes shfmt to use tabs
};
yamlfmt.enable = true;
};
};
};
};
}

186
go.mod
View File

@@ -1,142 +1,124 @@
module github.com/pterodactyl/wings
go 1.25.6
go 1.18
require (
emperror.dev/errors v0.8.1
github.com/AlecAivazis/survey/v2 v2.3.7
github.com/Jeffail/gabs/v2 v2.7.0
github.com/AlecAivazis/survey/v2 v2.3.6
github.com/Jeffail/gabs/v2 v2.6.1
github.com/NYTimes/logrotate v1.0.0
github.com/acobaugh/osrelease v0.1.0
github.com/apex/log v1.9.0
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
github.com/beevik/etree v1.5.0
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
github.com/beevik/etree v1.1.0
github.com/buger/jsonparser v1.1.1
github.com/cenkalti/backoff/v4 v4.3.0
github.com/creasty/defaults v1.8.0
github.com/docker/docker v28.3.3+incompatible
github.com/docker/go-connections v0.5.0
github.com/fatih/color v1.18.0
github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf
github.com/gabriel-vasile/mimetype v1.4.8
github.com/cenkalti/backoff/v4 v4.1.3
github.com/creasty/defaults v1.6.0
github.com/docker/docker v20.10.18+incompatible
github.com/docker/go-connections v0.4.0
github.com/fatih/color v1.13.0
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
github.com/gabriel-vasile/mimetype v1.4.1
github.com/gammazero/workerpool v1.1.3
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gin-gonic/gin v1.10.1
github.com/glebarez/sqlite v1.11.0
github.com/go-co-op/gocron v1.37.0
github.com/goccy/go-json v0.10.5
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3
github.com/iancoleman/strcase v0.3.0
github.com/icza/dyno v0.0.0-20230330125955-09f820a8d9c0
github.com/gin-gonic/gin v1.8.1
github.com/glebarez/sqlite v1.4.8
github.com/go-co-op/gocron v1.17.0
github.com/goccy/go-json v0.9.11
github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.5.0
github.com/iancoleman/strcase v0.2.0
github.com/icza/dyno v0.0.0-20220812133438-f0b6f8a18845
github.com/juju/ratelimit v1.0.2
github.com/karrick/godirwalk v1.17.0
github.com/klauspost/pgzip v1.2.6
github.com/magiconair/properties v1.8.9
github.com/mattn/go-colorable v0.1.14
github.com/mholt/archives v0.1.5
github.com/klauspost/compress v1.15.11
github.com/klauspost/pgzip v1.2.5
github.com/magiconair/properties v1.8.6
github.com/mattn/go-colorable v0.1.13
github.com/mholt/archiver/v4 v4.0.0-alpha.7
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/sftp v1.13.10
github.com/pkg/sftp v1.13.5
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
github.com/spf13/cobra v1.9.1
github.com/stretchr/testify v1.10.0
golang.org/x/crypto v0.46.0
golang.org/x/sync v0.19.0
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af
github.com/spf13/cobra v1.5.0
github.com/stretchr/testify v1.8.0
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0
gopkg.in/ini.v1 v1.67.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
gorm.io/gorm v1.26.0
gorm.io/gorm v1.23.10
)
require (
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.12.9 // indirect
github.com/STARRY-S/zip v0.2.3 // indirect
github.com/andybalholm/brotli v1.2.0 // indirect
github.com/bodgit/plumbing v1.3.0 // indirect
github.com/bodgit/sevenzip v1.6.1 // indirect
github.com/bodgit/windows v1.0.1 // indirect
github.com/bytedance/sonic v1.13.1 // indirect
github.com/bytedance/sonic/loader v0.2.4 // indirect
github.com/cloudwego/base64x v0.1.5 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Microsoft/hcsshim v0.9.4 // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/containerd/fifo v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/gammazero/deque v1.0.0 // indirect
github.com/gin-contrib/sse v1.0.0 // indirect
github.com/glebarez/go-sqlite v1.22.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.25.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/gammazero/deque v0.2.0 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/glebarez/go-sqlite v1.19.1 // indirect
github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect
github.com/go-playground/validator/v10 v10.11.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/magefile/mage v1.15.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/magefile/mage v1.14.0 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/mikelolasagasti/xz v1.0.1 // indirect
github.com/minio/minlz v1.0.1 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/nwaples/rardecode/v2 v2.2.0 // indirect
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/prometheus/client_golang v1.13.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sorairolake/lzip-go v0.3.8 // indirect
github.com/spf13/afero v1.15.0 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/ulikunitz/xz v0.5.15 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
go.opentelemetry.io/otel v1.35.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect
go.opentelemetry.io/otel/metric v1.35.0 // indirect
go.opentelemetry.io/otel/trace v1.35.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/arch v0.15.0 // indirect
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
google.golang.org/protobuf v1.36.5 // indirect
gotest.tools/v3 v3.0.2 // indirect
modernc.org/libc v1.61.13 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.8.2 // indirect
modernc.org/sqlite v1.36.0 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/therootcompany/xz v1.0.1 // indirect
github.com/ugorji/go/codec v1.2.7 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20221004154528-8021a29435af // indirect
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
golang.org/x/term v0.0.0-20220919170432-7a66f970e087 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.1.12 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/protobuf v1.28.1 // indirect
modernc.org/libc v1.20.0 // indirect
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.4.0 // indirect
modernc.org/sqlite v1.19.1 // indirect
)

1292
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -56,7 +56,6 @@ func (ac *activityCron) Run(ctx context.Context) error {
activities = append(activities, v)
}
// Delete any invalid activies
if len(ids) > 0 {
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{})
if tx.Error != nil {
@@ -72,28 +71,16 @@ func (ac *activityCron) Run(ctx context.Context) error {
return errors.WrapIf(err, "cron: failed to send activity events to Panel")
}
// Add all the successful activities to the list of IDs to delete.
ids = make([]int, len(activities))
for i, v := range activities {
ids[i] = v.ID
}
// SQLite has a limitation of how many parameters we can specify in a single
// query, so we need to delete the activies in chunks of 32,000 instead of
// all at once.
i := 0
idsLen := len(ids)
for i < idsLen {
start := i
end := min(i+32000, idsLen)
batchSize := end - start
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids[start:end]).Delete(&models.Activity{})
if tx.Error != nil {
return errors.WithStack(tx.Error)
}
i += batchSize
// Delete all the activities that were sent to the Panel (or that were invalid).
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{})
if tx.Error != nil {
return errors.WithStack(tx.Error)
}
return nil
}

View File

@@ -5,7 +5,6 @@ import (
"reflect"
"emperror.dev/errors"
"gorm.io/gorm"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
@@ -84,26 +83,9 @@ func (sc *sftpCron) Run(ctx context.Context) error {
if err := sc.manager.Client().SendActivityLogs(ctx, events.Elements()); err != nil {
return errors.Wrap(err, "failed to send sftp activity logs to Panel")
}
// SQLite has a limitation of how many parameters we can specify in a single
// query, so we need to delete the activies in chunks of 32,000 instead of
// all at once.
i := 0
idsLen := len(events.ids)
var tx *gorm.DB
for i < idsLen {
start := i
end := min(i+32000, idsLen)
batchSize := end - start
tx = database.Instance().WithContext(ctx).Where("id IN ?", events.ids[start:end]).Delete(&models.Activity{})
if tx.Error != nil {
return errors.WithStack(tx.Error)
}
i += batchSize
if tx := database.Instance().Where("id IN ?", events.ids).Delete(&models.Activity{}); tx.Error != nil {
return errors.WithStack(tx.Error)
}
return nil
}

View File

@@ -2,9 +2,9 @@ package models
import (
"database/sql"
"encoding/json"
"emperror.dev/errors"
"github.com/goccy/go-json"
)
type JsonNullString struct {

View File

@@ -1,115 +0,0 @@
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
package internal
import (
"io"
"os"
"sync/atomic"
)
// CountedWriter is a writer that counts the amount of data written to the
// underlying writer.
type CountedWriter struct {
file *os.File
counter atomic.Int64
err error
}
// NewCountedWriter returns a new countedWriter that counts the amount of bytes
// written to the underlying writer.
func NewCountedWriter(f *os.File) *CountedWriter {
return &CountedWriter{file: f}
}
// BytesWritten returns the amount of bytes that have been written to the
// underlying writer.
func (w *CountedWriter) BytesWritten() int64 {
return w.counter.Load()
}
// Error returns the error from the writer if any. If the error is an EOF, nil
// will be returned.
func (w *CountedWriter) Error() error {
if w.err == io.EOF {
return nil
}
return w.err
}
// Write writes bytes to the underlying writer while tracking the total amount
// of bytes written.
func (w *CountedWriter) Write(p []byte) (int, error) {
if w.err != nil {
return 0, io.EOF
}
// Write is a very simple operation for us to handle.
n, err := w.file.Write(p)
w.counter.Add(int64(n))
w.err = err
// TODO: is this how we actually want to handle errors with this?
if err == io.EOF {
return n, io.EOF
}
return n, nil
}
func (w *CountedWriter) ReadFrom(r io.Reader) (n int64, err error) {
cr := NewCountedReader(r)
n, err = w.file.ReadFrom(cr)
w.counter.Add(n)
return
}
// CountedReader is a reader that counts the amount of data read from the
// underlying reader.
type CountedReader struct {
reader io.Reader
counter atomic.Int64
err error
}
var _ io.Reader = (*CountedReader)(nil)
// NewCountedReader returns a new countedReader that counts the amount of bytes
// read from the underlying reader.
func NewCountedReader(r io.Reader) *CountedReader {
return &CountedReader{reader: r}
}
// BytesRead returns the amount of bytes that have been read from the underlying
// reader.
func (r *CountedReader) BytesRead() int64 {
return r.counter.Load()
}
// Error returns the error from the reader if any. If the error is an EOF, nil
// will be returned.
func (r *CountedReader) Error() error {
if r.err == io.EOF {
return nil
}
return r.err
}
// Read reads bytes from the underlying reader while tracking the total amount
// of bytes read.
func (r *CountedReader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, io.EOF
}
n, err := r.reader.Read(p)
r.counter.Add(int64(n))
r.err = err
// TODO: is this how we actually want to handle errors with this?
if err == io.EOF {
return n, io.EOF
}
return n, nil
}

View File

@@ -2,6 +2,8 @@ package parser
import (
"bytes"
"io"
"os"
"regexp"
"strconv"
"strings"
@@ -27,14 +29,24 @@ var configMatchRegex = regexp.MustCompile(`{{\s?config\.([\w.-]+)\s?}}`)
// matching:
//
// <Root>
//
// <Property value="testing"/>
//
// <Property value="testing"/>
// </Root>
//
// noinspection RegExpRedundantEscape
var xmlValueMatchRegex = regexp.MustCompile(`^\[([\w]+)='(.*)'\]$`)
// Gets the []byte representation of a configuration file to be passed through to other
// handler functions. If the file does not currently exist, it will be created.
func readFileBytes(path string) ([]byte, error) {
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
if err != nil {
return nil, err
}
defer file.Close()
return io.ReadAll(file)
}
// Gets the value of a key based on the value type defined.
func (cfr *ConfigurationFileReplacement) getKeyValue(value string) interface{} {
if cfr.ReplaceWith.Type() == jsonparser.Boolean {

View File

@@ -2,10 +2,8 @@ package parser
import (
"bufio"
"bytes"
"encoding/json"
"io"
"os"
"path/filepath"
"strconv"
"strings"
@@ -13,6 +11,7 @@ import (
"github.com/apex/log"
"github.com/beevik/etree"
"github.com/buger/jsonparser"
"github.com/goccy/go-json"
"github.com/icza/dyno"
"github.com/magiconair/properties"
"gopkg.in/ini.v1"
@@ -75,26 +74,6 @@ func (cv *ReplaceValue) String() string {
}
}
func (cv *ReplaceValue) Bytes() []byte {
switch cv.Type() {
case jsonparser.String:
var stackbuf [64]byte
bU, err := jsonparser.Unescape(cv.value, stackbuf[:])
if err != nil {
panic(errors.Wrap(err, "parser: could not parse value"))
}
return bU
case jsonparser.Null:
return []byte("<nil>")
case jsonparser.Boolean:
return cv.value
case jsonparser.Number:
return cv.value
default:
return []byte("<invalid>")
}
}
type ConfigurationParser string
func (cp ConfigurationParser) String() string {
@@ -161,14 +140,14 @@ func (cfr *ConfigurationFileReplacement) UnmarshalJSON(data []byte) error {
iv, err := jsonparser.GetString(data, "if_value")
// We only check keypath here since match & replace_with should be present on all of
// them, however if_value is optional.
if err != nil && !errors.Is(err, jsonparser.KeyPathNotFoundError) {
if err != nil && err != jsonparser.KeyPathNotFoundError {
return err
}
cfr.IfValue = iv
rw, dt, _, err := jsonparser.Get(data, "replace_with")
if err != nil {
if !errors.Is(err, jsonparser.KeyPathNotFoundError) {
if err != jsonparser.KeyPathNotFoundError {
return err
}
@@ -188,12 +167,11 @@ func (cfr *ConfigurationFileReplacement) UnmarshalJSON(data []byte) error {
return nil
}
// Parse parses a given configuration file and updates all the values within
// as defined in the API response from the Panel.
func (f *ConfigurationFile) Parse(file *os.File) error {
// log.WithField("path", path).WithField("parser", f.Parser.String()).Debug("parsing server configuration file")
// Parses a given configuration file and updates all of the values within as defined
// in the API response from the Panel.
func (f *ConfigurationFile) Parse(path string, internal bool) error {
log.WithField("path", path).WithField("parser", f.Parser.String()).Debug("parsing server configuration file")
// What the fuck is going on here?
if mb, err := json.Marshal(config.Get()); err != nil {
return err
} else {
@@ -204,24 +182,56 @@ func (f *ConfigurationFile) Parse(file *os.File) error {
switch f.Parser {
case Properties:
err = f.parsePropertiesFile(file)
err = f.parsePropertiesFile(path)
break
case File:
err = f.parseTextFile(file)
err = f.parseTextFile(path)
break
case Yaml, "yml":
err = f.parseYamlFile(file)
err = f.parseYamlFile(path)
break
case Json:
err = f.parseJsonFile(file)
err = f.parseJsonFile(path)
break
case Ini:
err = f.parseIniFile(file)
err = f.parseIniFile(path)
break
case Xml:
err = f.parseXmlFile(file)
err = f.parseXmlFile(path)
break
}
if errors.Is(err, os.ErrNotExist) {
// File doesn't exist, we tried creating it, and same error is returned? Pretty
// sure this pathway is impossible, but if not, abort here.
if internal {
return nil
}
b := strings.TrimSuffix(path, filepath.Base(path))
if err := os.MkdirAll(b, 0o755); err != nil {
return errors.WithMessage(err, "failed to create base directory for missing configuration file")
} else {
if _, err := os.Create(path); err != nil {
return errors.WithMessage(err, "failed to create missing configuration file")
}
}
return f.Parse(path, true)
}
return err
}
// Parses an xml file.
func (f *ConfigurationFile) parseXmlFile(file *os.File) error {
func (f *ConfigurationFile) parseXmlFile(path string) error {
doc := etree.NewDocument()
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
if err != nil {
return err
}
defer file.Close()
if _, err := doc.ReadFrom(file); err != nil {
return err
}
@@ -281,27 +291,41 @@ func (f *ConfigurationFile) parseXmlFile(file *os.File) error {
}
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
// If you don't truncate the file you'll end up duplicating the data in there (or just appending
// to the end of the file. We don't want to do that.
if err := file.Truncate(0); err != nil {
return err
}
// Move the cursor to the start of the file to avoid weird spacing issues.
file.Seek(0, 0)
// Ensure the XML is indented properly.
doc.Indent(2)
// Write the XML to the file.
if _, err := doc.WriteTo(file); err != nil {
// Truncate the file before attempting to write the changes.
if err := os.Truncate(path, 0); err != nil {
return err
}
return nil
// Write the XML to the file.
_, err = doc.WriteTo(file)
return err
}
// Parses an ini file.
func (f *ConfigurationFile) parseIniFile(file *os.File) error {
// Wrap the file in a NopCloser so the ini package doesn't close the file.
cfg, err := ini.Load(io.NopCloser(file))
func (f *ConfigurationFile) parseIniFile(path string) error {
// Ini package can't handle a non-existent file, so handle that automatically here
// by creating it if not exists. Then, immediately close the file since we will use
// other methods to write the new contents.
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
if err != nil {
return err
}
file.Close()
cfg, err := ini.Load(path)
if err != nil {
return err
}
@@ -364,24 +388,14 @@ func (f *ConfigurationFile) parseIniFile(file *os.File) error {
}
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
if err := file.Truncate(0); err != nil {
return err
}
if _, err := cfg.WriteTo(file); err != nil {
return err
}
return nil
return cfg.SaveTo(path)
}
// Parses a json file updating any matching key/value pairs. If a match is not found, the
// value is set regardless in the file. See the commentary in parseYamlFile for more details
// about what is happening during this process.
func (f *ConfigurationFile) parseJsonFile(file *os.File) error {
b, err := io.ReadAll(file)
func (f *ConfigurationFile) parseJsonFile(path string) error {
b, err := readFileBytes(path)
if err != nil {
return err
}
@@ -391,24 +405,14 @@ func (f *ConfigurationFile) parseJsonFile(file *os.File) error {
return err
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
if err := file.Truncate(0); err != nil {
return err
}
// Write the data to the file.
if _, err := io.Copy(file, bytes.NewReader(data.BytesIndent("", " "))); err != nil {
return errors.Wrap(err, "parser: failed to write properties file to disk")
}
return nil
output := []byte(data.StringIndent("", " "))
return os.WriteFile(path, output, 0o644)
}
// Parses a yaml file and updates any matching key/value pairs before persisting
// it back to the disk.
func (f *ConfigurationFile) parseYamlFile(file *os.File) error {
b, err := io.ReadAll(file)
func (f *ConfigurationFile) parseYamlFile(path string) error {
b, err := readFileBytes(path)
if err != nil {
return err
}
@@ -439,56 +443,35 @@ func (f *ConfigurationFile) parseYamlFile(file *os.File) error {
return err
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
if err := file.Truncate(0); err != nil {
return err
}
// Write the data to the file.
if _, err := io.Copy(file, bytes.NewReader(marshaled)); err != nil {
return errors.Wrap(err, "parser: failed to write properties file to disk")
}
return nil
return os.WriteFile(path, marshaled, 0o644)
}
// Parses a text file using basic find and replace. This is a highly inefficient method of
// scanning a file and performing a replacement. You should attempt to use anything other
// than this function where possible.
func (f *ConfigurationFile) parseTextFile(file *os.File) error {
b := bytes.NewBuffer(nil)
s := bufio.NewScanner(file)
var replaced bool
for s.Scan() {
line := s.Bytes()
replaced = false
func (f *ConfigurationFile) parseTextFile(path string) error {
input, err := os.ReadFile(path)
if err != nil {
return err
}
lines := strings.Split(string(input), "\n")
for i, line := range lines {
for _, replace := range f.Replace {
// If this line doesn't match what we expect for the replacement, move on to the next
// line. Otherwise, update the line to have the replacement value.
if !bytes.HasPrefix(line, []byte(replace.Match)) {
if !strings.HasPrefix(line, replace.Match) {
continue
}
b.Write(replace.ReplaceWith.Bytes())
replaced = true
lines[i] = replace.ReplaceWith.String()
}
if !replaced {
b.Write(line)
}
b.WriteByte('\n')
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
if err := file.Truncate(0); err != nil {
if err := os.WriteFile(path, []byte(strings.Join(lines, "\n")), 0o644); err != nil {
return err
}
// Write the data to the file.
if _, err := io.Copy(file, b); err != nil {
return errors.Wrap(err, "parser: failed to write properties file to disk")
}
return nil
}
@@ -518,29 +501,31 @@ func (f *ConfigurationFile) parseTextFile(file *os.File) error {
//
// @see https://github.com/pterodactyl/panel/issues/2308 (original)
// @see https://github.com/pterodactyl/panel/issues/3009 ("bug" introduced as result)
func (f *ConfigurationFile) parsePropertiesFile(file *os.File) error {
b, err := io.ReadAll(file)
if err != nil {
return err
}
s := bytes.NewBuffer(nil)
scanner := bufio.NewScanner(bytes.NewReader(b))
// Scan until we hit a line that is not a comment that actually has content
// on it. Keep appending the comments until that time.
for scanner.Scan() {
text := scanner.Bytes()
if len(text) > 0 && text[0] != '#' {
break
func (f *ConfigurationFile) parsePropertiesFile(path string) error {
var s strings.Builder
// Open the file and attempt to load any comments that currenty exist at the start
// of the file. This is kind of a hack, but should work for a majority of users for
// the time being.
if fd, err := os.Open(path); err != nil {
return errors.Wrap(err, "parser: could not open file for reading")
} else {
scanner := bufio.NewScanner(fd)
// Scan until we hit a line that is not a comment that actually has content
// on it. Keep appending the comments until that time.
for scanner.Scan() {
text := scanner.Text()
if len(text) > 0 && text[0] != '#' {
break
}
s.WriteString(text + "\n")
}
_ = fd.Close()
if err := scanner.Err(); err != nil {
return errors.WithStackIf(err)
}
s.Write(text)
s.WriteByte('\n')
}
if err := scanner.Err(); err != nil {
return errors.WithStackIf(err)
}
p, err := properties.Load(b, properties.UTF8)
p, err := properties.LoadFile(path, properties.UTF8)
if err != nil {
return errors.Wrap(err, "parser: could not load properties file for configuration update")
}
@@ -578,16 +563,17 @@ func (f *ConfigurationFile) parsePropertiesFile(file *os.File) error {
s.WriteString(key + "=" + strings.Trim(strconv.QuoteToASCII(value), "\"") + "\n")
}
if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
if err := file.Truncate(0); err != nil {
// Open the file for writing.
w, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
if err != nil {
return err
}
defer w.Close()
// Write the data to the file.
if _, err := io.Copy(file, s); err != nil {
if _, err := w.Write([]byte(s.String())); err != nil {
return errors.Wrap(err, "parser: failed to write properties file to disk")
}
return nil
}

View File

@@ -3,7 +3,6 @@ package remote
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
@@ -16,6 +15,7 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/cenkalti/backoff/v4"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/system"
)

View File

@@ -2,11 +2,11 @@ package remote
import (
"bytes"
"encoding/json"
"regexp"
"strings"
"github.com/apex/log"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/parser"
)

View File

@@ -2,7 +2,6 @@ package downloader
import (
"context"
"encoding/json"
"fmt"
"io"
"mime"
@@ -15,63 +14,26 @@ import (
"time"
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/google/uuid"
"github.com/pterodactyl/wings/server"
)
var client *http.Client
func init() {
dialer := &net.Dialer{
LocalAddr: nil,
}
trnspt := http.DefaultTransport.(*http.Transport).Clone()
trnspt.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
c, err := dialer.DialContext(ctx, network, addr)
if err != nil {
return nil, errors.WithStack(err)
}
ipStr, _, err := net.SplitHostPort(c.RemoteAddr().String())
if err != nil {
return c, errors.WithStack(err)
}
ip := net.ParseIP(ipStr)
if ip == nil {
return c, errors.WithStack(ErrInvalidIPAddress)
}
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() {
return c, errors.WithStack(ErrInternalResolution)
}
for _, block := range internalRanges {
if !block.Contains(ip) {
continue
}
return c, errors.WithStack(ErrInternalResolution)
}
return c, nil
}
client = &http.Client{
Timeout: time.Hour * 12,
Transport: trnspt,
// Disallow any redirect on an HTTP call. This is a security requirement: do not modify
// this logic without first ensuring that the new target location IS NOT within the current
// instance's local network.
//
// This specific error response just causes the client to not follow the redirect and
// returns the actual redirect response to the caller. Not perfect, but simple and most
// people won't be using URLs that redirect anyways hopefully?
//
// We'll re-evaluate this down the road if needed.
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
var client = &http.Client{
Timeout: time.Hour * 12,
// Disallow any redirect on an HTTP call. This is a security requirement: do not modify
// this logic without first ensuring that the new target location IS NOT within the current
// instance's local network.
//
// This specific error response just causes the client to not follow the redirect and
// returns the actual redirect response to the caller. Not perfect, but simple and most
// people won't be using URLs that redirect anyways hopefully?
//
// We'll re-evaluate this down the road if needed.
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
var instance = &Downloader{
@@ -181,6 +143,12 @@ func (dl *Download) Execute() error {
dl.cancelFunc = &cancel
defer dl.Cancel()
// Always ensure that we're checking the destination for the download to avoid a malicious
// user from accessing internal network resources.
if err := dl.isExternalNetwork(ctx); err != nil {
return err
}
// At this point we have verified the destination is not within the local network, so we can
// now make a request to that URL and pull down the file, saving it to the server's data
// directory.
@@ -199,8 +167,13 @@ func (dl *Download) Execute() error {
return errors.New("downloader: got bad response status from endpoint: " + res.Status)
}
if res.ContentLength < 1 {
return errors.New("downloader: request is missing ContentLength")
// If there is a Content-Length header on this request go ahead and check that we can
// even write the whole file before beginning this process. If there is no header present
// we'll just have to give it a spin and see how it goes.
if res.ContentLength > 0 {
if err := dl.server.Filesystem().HasSpaceFor(res.ContentLength); err != nil {
return errors.WrapIf(err, "downloader: failed to write file: not enough space")
}
}
if dl.req.UseHeader {
@@ -227,10 +200,8 @@ func (dl *Download) Execute() error {
p := dl.Path()
dl.server.Log().WithField("path", p).Debug("writing remote file to disk")
// Write the file while tracking the progress, Write will check that the
// size of the file won't exceed the disk limit.
r := io.TeeReader(res.Body, dl.counter(res.ContentLength))
if err := dl.server.Filesystem().Write(p, r, res.ContentLength, 0o644); err != nil {
if err := dl.server.Filesystem().Writefile(p, r); err != nil {
return errors.WrapIf(err, "downloader: failed to write file to server directory")
}
return nil
@@ -275,6 +246,59 @@ func (dl *Download) counter(contentLength int64) *Counter {
}
}
// Verifies that a given download resolves to a location not within the current local
// network for the machine. If the final destination of a resource is within the local
// network an ErrInternalResolution error is returned.
func (dl *Download) isExternalNetwork(ctx context.Context) error {
dialer := &net.Dialer{
LocalAddr: nil,
}
host := dl.req.URL.Host
// This cluster-fuck of math and integer shit converts an integer IP into a proper IPv4.
// For example: 16843009 would become 1.1.1.1
//if i, err := strconv.ParseInt(host, 10, 64); err == nil {
// host = strconv.FormatInt((i>>24)&0xFF, 10) + "." + strconv.FormatInt((i>>16)&0xFF, 10) + "." + strconv.FormatInt((i>>8)&0xFF, 10) + "." + strconv.FormatInt(i&0xFF, 10)
//}
if _, _, err := net.SplitHostPort(host); err != nil {
if !strings.Contains(err.Error(), "missing port in address") {
return errors.WithStack(err)
}
switch dl.req.URL.Scheme {
case "http":
host += ":80"
case "https":
host += ":443"
}
}
c, err := dialer.DialContext(ctx, "tcp", host)
if err != nil {
return errors.WithStack(err)
}
_ = c.Close()
ipStr, _, err := net.SplitHostPort(c.RemoteAddr().String())
if err != nil {
return errors.WithStack(err)
}
ip := net.ParseIP(ipStr)
if ip == nil {
return errors.WithStack(ErrInvalidIPAddress)
}
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() {
return errors.WithStack(ErrInternalResolution)
}
for _, block := range internalRanges {
if block.Contains(ip) {
return errors.WithStack(ErrInternalResolution)
}
}
return nil
}
// Downloader represents a global downloader that keeps track of all currently processing downloads
// for the machine.
type Downloader struct {

View File

@@ -168,6 +168,7 @@ func RequireAuthorization() gin.HandlerFunc {
// We don't put this value outside this function since the node's authentication
// token can be changed on the fly and the config.Get() call returns a copy, so
// if it is rotated this value will never properly get updated.
token := config.Get().AuthenticationToken
auth := strings.SplitN(c.GetHeader("Authorization"), " ", 2)
if len(auth) != 2 || auth[0] != "Bearer" {
c.Header("WWW-Authenticate", "Bearer")
@@ -178,7 +179,7 @@ func RequireAuthorization() gin.HandlerFunc {
// All requests to Wings must be authorized with the authentication token present in
// the Wings configuration file. Remeber, all requests to Wings come from the Panel
// backend, or using a signed JWT for temporary authentication.
if subtle.ConstantTimeCompare([]byte(auth[1]), []byte(config.Get().Token.Token)) != 1 {
if subtle.ConstantTimeCompare([]byte(auth[1]), []byte(token)) != 1 {
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"error": "You are not authorized to access this endpoint."})
return
}

View File

@@ -118,9 +118,9 @@ func (re *RequestError) asFilesystemError() (int, string) {
return 0, ""
}
if filesystem.IsErrorCode(err, filesystem.ErrNotExist) ||
filesystem.IsPathError(err) ||
filesystem.IsLinkError(err) {
return http.StatusNotFound, "The requested file or folder does not exist on the system."
filesystem.IsErrorCode(err, filesystem.ErrCodePathResolution) ||
strings.Contains(err.Error(), "resolves to a location outside the server root") {
return http.StatusNotFound, "The requested resources was not found on the system."
}
if filesystem.IsErrorCode(err, filesystem.ErrCodeDenylistFile) || strings.Contains(err.Error(), "filesystem: file access prohibited") {
return http.StatusForbidden, "This file cannot be modified: present in egg denylist."

View File

@@ -62,7 +62,6 @@ func Configure(m *wserver.Manager, client remote.Client) *gin.Engine {
protected.GET("/api/servers", getAllServers)
protected.POST("/api/servers", postCreateServer)
protected.DELETE("/api/transfers/:server", deleteTransfer)
protected.POST("/api/deauthorize-user", postDeauthorizeUser)
// These are server specific routes, and require that the request be authorized, and
// that the server exist on the Daemon.

View File

@@ -8,7 +8,6 @@ import (
"strconv"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/router/tokens"
@@ -20,14 +19,12 @@ func getDownloadBackup(c *gin.Context) {
client := middleware.ExtractApiClient(c)
manager := middleware.ExtractManager(c)
// Get the payload from the token.
token := tokens.BackupPayload{}
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
middleware.CaptureAndAbort(c, err)
return
}
// Get the server using the UUID from the token.
if _, ok := manager.Get(token.ServerUuid); !ok || !token.IsUniqueRequest() {
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
"error": "The requested resource was not found on this server.",
@@ -35,14 +32,6 @@ func getDownloadBackup(c *gin.Context) {
return
}
// Validate that the BackupUuid field is actually a UUID and not some random characters or a
// file path.
if _, err := uuid.Parse(token.BackupUuid); err != nil {
middleware.CaptureAndAbort(c, err)
return
}
// Locate the backup on the local disk.
b, st, err := backup.LocateLocal(client, token.BackupUuid)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
@@ -56,8 +45,6 @@ func getDownloadBackup(c *gin.Context) {
return
}
// The use of `os` here is safe as backups are not stored within server access
// directories, and this path is program-controlled, not user input.
f, err := os.Open(b.Path())
if err != nil {
middleware.CaptureAndAbort(c, err)
@@ -89,19 +76,27 @@ func getDownloadFile(c *gin.Context) {
return
}
f, st, err := s.Filesystem().File(token.FilePath)
p, _ := s.Filesystem().SafePath(token.FilePath)
st, err := os.Stat(p)
// If there is an error or we're somehow trying to download a directory, just
// respond with the appropriate error.
if err != nil {
middleware.CaptureAndAbort(c, err)
return
}
defer f.Close()
if st.IsDir() {
} else if st.IsDir() {
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
"error": "The requested resource was not found on this server.",
})
return
}
f, err := os.Open(p)
if err != nil {
middleware.CaptureAndAbort(c, err)
return
}
defer f.Close()
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(st.Name()))
c.Header("Content-Type", "application/octet-stream")

View File

@@ -225,18 +225,13 @@ func deleteServer(c *gin.Context) {
// done in a separate process since failure is not the end of the world and can be
// manually cleaned up after the fact.
//
// In addition, servers with large numbers of files can take some time to finish deleting,
// In addition, servers with large amounts of files can take some time to finish deleting,
// so we don't want to block the HTTP call while waiting on this.
p := s.Filesystem().Path()
go func(p string) {
if err := os.RemoveAll(p); err != nil {
log.WithFields(log.Fields{"path": p, "error": err}).Warn("failed to remove server files during deletion process")
}
}(p)
if err := s.Filesystem().Close(); err != nil {
log.WithFields(log.Fields{"server": s.ID(), "error": err}).Warn("failed to close filesystem root")
}
}(s.Filesystem().Path())
middleware.ExtractManager(c).Remove(func(server *server.Server) bool {
return server.ID() == s.ID()
@@ -251,8 +246,6 @@ func deleteServer(c *gin.Context) {
// Adds any of the JTIs passed through in the body to the deny list for the websocket
// preventing any JWT generated before the current time from being used to connect to
// the socket or send along commands.
//
// deprecated: prefer /api/deauthorize-user
func postServerDenyWSTokens(c *gin.Context) {
var data struct {
JTIs []string `json:"jtis"`

View File

@@ -30,7 +30,8 @@ import (
// getServerFileContents returns the contents of a file on the server.
func getServerFileContents(c *gin.Context) {
s := middleware.ExtractServer(c)
f, st, err := s.Filesystem().File(c.Query("file"))
p := "/" + strings.TrimLeft(c.Query("file"), "/")
f, st, err := s.Filesystem().File(p)
if err != nil {
middleware.CaptureAndAbort(c, err)
return
@@ -238,16 +239,7 @@ func postServerWriteFile(c *gin.Context) {
middleware.CaptureAndAbort(c, err)
return
}
// A content length of -1 means the actual length is unknown.
if c.Request.ContentLength == -1 {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Missing Content-Length",
})
return
}
if err := s.Filesystem().Write(f, c.Request.Body, c.Request.ContentLength, 0o644); err != nil {
if err := s.Filesystem().Writefile(f, c.Request.Body); err != nil {
if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Cannot write file, name conflicts with an existing directory by the same name.",
@@ -421,15 +413,9 @@ func postServerCompressFiles(c *gin.Context) {
return
}
f, err := s.Filesystem().CompressFiles(c.Request.Context(), data.RootPath, data.Files)
f, err := s.Filesystem().CompressFiles(data.RootPath, data.Files)
if err != nil {
if errors.Is(err, filesystem.ErrNoSpaceAvailable) {
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
"error": "This server does not have enough available disk space to generate a compressed archive.",
})
} else {
middleware.CaptureAndAbort(c, err)
}
middleware.CaptureAndAbort(c, err)
return
}
@@ -453,6 +439,17 @@ func postServerDecompressFiles(c *gin.Context) {
s := middleware.ExtractServer(c)
lg := middleware.ExtractLogger(c).WithFields(log.Fields{"root_path": data.RootPath, "file": data.File})
lg.Debug("checking if space is available for file decompression")
err := s.Filesystem().SpaceAvailableForDecompression(context.Background(), data.RootPath, data.File)
if err != nil {
if filesystem.IsErrorCode(err, filesystem.ErrCodeUnknownArchive) {
lg.WithField("error", err).Warn("failed to decompress file: unknown archive format")
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "The archive provided is in a format Wings does not understand."})
return
}
middleware.CaptureAndAbort(c, err)
return
}
lg.Info("starting file decompression")
if err := s.Filesystem().DecompressFile(context.Background(), data.RootPath, data.File); err != nil {
@@ -592,9 +589,15 @@ func postServerUploadFiles(c *gin.Context) {
}
for _, header := range headers {
p, err := s.Filesystem().SafePath(filepath.Join(directory, header.Filename))
if err != nil {
middleware.CaptureAndAbort(c, err)
return
}
// We run this in a different method so I can use defer without any of
// the consequences caused by calling it in a loop.
if err := handleFileUpload(filepath.Join(directory, header.Filename), s, header); err != nil {
if err := handleFileUpload(p, s, header); err != nil {
middleware.CaptureAndAbort(c, err)
return
} else {
@@ -616,7 +619,7 @@ func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader)
if err := s.Filesystem().IsIgnored(p); err != nil {
return err
}
if err := s.Filesystem().Write(p, file, header.Size, 0o644); err != nil {
if err := s.Filesystem().Writefile(p, file); err != nil {
return err
}
return nil

View File

@@ -2,17 +2,14 @@ package router
import (
"context"
"encoding/json"
"net/http"
"time"
"emperror.dev/errors"
"github.com/gin-gonic/gin"
"github.com/goccy/go-json"
ws "github.com/gorilla/websocket"
"github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/router/websocket"
"github.com/pterodactyl/wings/server"
"golang.org/x/time/rate"
)
var expectedCloseCodes = []int{
@@ -28,27 +25,6 @@ func getServerWebsocket(c *gin.Context) {
manager := middleware.ExtractManager(c)
s, _ := manager.Get(c.Param("server"))
// Limit the total number of websockets that can be opened at any one time for
// a server instance. This applies across all users connected to the server, and
// is not applied on a per-user basis.
//
// todo: it would be great to make this per-user instead, but we need to modify
// how we even request this endpoint in order for that to be possible. Some type
// of signed identifier in the URL that is verified on this end and set by the
// panel using a shared secret is likely the easiest option. The benefit of that
// is that we can both scope things to the user before authentication, and also
// verify that the JWT provided by the panel is assigned to the same user.
if s.Websockets().Len() >= 30 {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Too many open websocket connections.",
})
return
}
c.Header("Content-Security-Policy", "default-src 'self'")
c.Header("X-Frame-Options", "DENY")
// Create a context that can be canceled when the user disconnects from this
// socket that will also cancel listeners running in separate threads. If the
// connection itself is terminated listeners using this context will also be
@@ -61,61 +37,36 @@ func getServerWebsocket(c *gin.Context) {
middleware.CaptureAndAbort(c, err)
return
}
defer handler.Connection.Close()
// Track this open connection on the server so that we can close them all programmatically
// if the server is deleted.
s.Websockets().Push(handler.Uuid(), &cancel)
handler.Logger().Debug("opening connection to server websocket")
defer s.Websockets().Remove(handler.Uuid())
go func() {
select {
// When the main context is canceled (through disconnect, server deletion, or server
// suspension) close the connection itself.
case <-ctx.Done():
handler.Logger().Debug("closing connection to server websocket")
if err := handler.Connection.Close(); err != nil {
handler.Logger().WithError(err).Error("failed to close websocket connection")
}
break
}
defer func() {
s.Websockets().Remove(handler.Uuid())
handler.Logger().Debug("closing connection to server websocket")
}()
// If the server is deleted we need to send a close message to the connected client
// so that they disconnect since there will be no more events sent along. Listen for
// the request context being closed to break this loop, otherwise this routine will
// be left hanging in the background.
go func() {
select {
case <-ctx.Done():
return
// If the server is deleted we need to send a close message to the connected client
// so that they disconnect since there will be no more events sent along. Listen for
// the request context being closed to break this loop, otherwise this routine will
// be left hanging in the background.
break
case <-s.Context().Done():
cancel()
_ = handler.Connection.WriteControl(ws.CloseMessage, ws.FormatCloseMessage(ws.CloseGoingAway, "server deleted"), time.Now().Add(time.Second*5))
break
}
}()
// Due to how websockets are handled we need to connect to the socket
// and _then_ abort it if the server is suspended. You cannot capture
// the HTTP response in the websocket client, thus we connect and then
// immediately close with failure.
if s.IsSuspended() {
_ = handler.Connection.WriteMessage(ws.CloseMessage, ws.FormatCloseMessage(4409, "server is suspended"))
return
}
// There is a separate rate limiter that applies to individual message types
// within the actual websocket logic handler. _This_ rate limiter just exists
// to avoid enormous floods of data through the socket since we need to parse
// JSON each time. This rate limit realistically should never be hit since this
// would require sending 50+ messages a second over the websocket (no more than
// 10 per 200ms).
var throttled bool
rl := rate.NewLimiter(rate.Every(time.Millisecond*200), 10)
for {
t, p, err := handler.Connection.ReadMessage()
j := websocket.Message{}
_, p, err := handler.Connection.ReadMessage()
if err != nil {
if ws.IsUnexpectedCloseError(err, expectedCloseCodes...) {
handler.Logger().WithField("error", err).Warn("error handling websocket message for server")
@@ -123,39 +74,16 @@ func getServerWebsocket(c *gin.Context) {
break
}
if !rl.Allow() {
if !throttled {
throttled = true
_ = handler.Connection.WriteJSON(websocket.Message{Event: websocket.ThrottledEvent, Args: []string{"global"}})
}
continue
}
throttled = false
// If the message isn't a format we expect, or the length of the message is far larger
// than we'd ever expect, drop it. The websocket upgrader logic does enforce a maximum
// _compressed_ message size of 4Kb but that could decompress to a much larger amount
// of data.
if t != ws.TextMessage || len(p) > 32_768 {
continue
}
// Discard and JSON parse errors into the void and don't continue processing this
// specific socket request. If we did a break here the client would get disconnected
// from the socket, which is NOT what we want to do.
var j websocket.Message
if err := json.Unmarshal(p, &j); err != nil {
continue
}
go func(msg websocket.Message) {
if err := handler.HandleInbound(ctx, msg); err != nil {
if errors.Is(err, server.ErrSuspended) {
cancel()
} else {
_ = handler.SendErrorJson(msg, err)
}
_ = handler.SendErrorJson(msg, err)
}
}(j)
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/apex/log"
"github.com/gin-gonic/gin"
"github.com/pterodactyl/wings/router/tokens"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/router/middleware"
@@ -114,21 +113,9 @@ func postCreateServer(c *gin.Context) {
c.Status(http.StatusAccepted)
}
type postUpdateConfigurationResponse struct {
Applied bool `json:"applied"`
}
// Updates the running configuration for this Wings instance.
func postUpdateConfiguration(c *gin.Context) {
cfg := config.Get()
if cfg.IgnorePanelConfigUpdates {
c.JSON(http.StatusOK, postUpdateConfigurationResponse{
Applied: false,
})
return
}
if err := c.BindJSON(&cfg); err != nil {
return
}
@@ -152,38 +139,5 @@ func postUpdateConfiguration(c *gin.Context) {
// Since we wrote it to the disk successfully now update the global configuration
// state to use this new configuration struct.
config.Set(cfg)
c.JSON(http.StatusOK, postUpdateConfigurationResponse{
Applied: true,
})
}
func postDeauthorizeUser(c *gin.Context) {
var data struct {
User string `json:"user"`
Servers []string `json:"servers"`
}
if err := c.BindJSON(&data); err != nil {
return
}
// todo: disconnect websockets more gracefully
m := middleware.ExtractManager(c)
if len(data.Servers) > 0 {
for _, uuid := range data.Servers {
if s, ok := m.Get(uuid); ok {
tokens.DenyForServer(s.ID(), data.User)
s.Websockets().CancelAll()
s.Sftp().Cancel(data.User)
}
}
} else {
for _, s := range m.All() {
tokens.DenyForServer(s.ID(), data.User)
s.Websockets().CancelAll()
s.Sftp().Cancel(data.User)
}
}
c.Status(http.StatusNoContent)
}

View File

@@ -106,11 +106,8 @@ func postTransfers(c *gin.Context) {
if !successful && err != nil {
// Delete all extracted files.
go func(trnsfr *transfer.Transfer) {
_ = trnsfr.Server.Filesystem().Close()
if err := os.RemoveAll(trnsfr.Server.Filesystem().Path()); err != nil {
if !errors.Is(err, os.ErrNotExist) {
trnsfr.Log().WithError(err).Warn("failed to delete local server files")
}
if err := os.RemoveAll(trnsfr.Server.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
trnsfr.Log().WithError(err).Warn("failed to delete local server files")
}
}(trnsfr)
}
@@ -168,7 +165,7 @@ out:
case "archive":
trnsfr.Log().Debug("received archive")
if _, err := trnsfr.Server.EnsureDataDirectoryExists(); err != nil {
if err := trnsfr.Server.EnsureDataDirectoryExists(); err != nil {
middleware.CaptureAndAbort(c, err)
return
}

View File

@@ -24,29 +24,16 @@ var wingsBootTime = time.Now()
// This is used to allow the Panel to revoke tokens en-masse for a given user & server
// combination since the JTI for tokens is just MD5(user.id + server.uuid). When a server
// is booted this listing is fetched from the panel and the Websocket is dynamically updated.
//
// deprecated: prefer use of userDenylist
var denylist sync.Map
var userDenylist sync.Map
// Adds a JTI to the denylist by marking any JWTs generated before the current time as
// being invalid if they use the same JTI.
//
// deprecated: prefer the use of DenyForServer
func DenyJTI(jti string) {
log.WithField("jti", jti).Debugf("adding \"%s\" to JTI denylist", jti)
denylist.Store(jti, time.Now())
}
// DenyForServer adds a user UUID to the denylist marking any existing JWTs issued
// to the user as being invalid. This is associated with the user.
func DenyForServer(s string, u string) {
log.WithField("user_uuid", u).WithField("server_uuid", s).Debugf("denying all JWTs created at or before current time for user \"%s\"", u)
userDenylist.Store(strings.Join([]string{s, u}, ":"), time.Now())
}
// WebsocketPayload defines the JWT payload for a websocket connection. This JWT is passed along to
// the websocket after it has been connected to by sending an "auth" event.
type WebsocketPayload struct {
@@ -92,21 +79,12 @@ func (p *WebsocketPayload) Denylisted() bool {
// Finally, if the token was issued before a time that is currently denied for this
// token instance, ignore the permissions response.
//
// This list is deprecated, but we maintain the check here so that custom instances
// are able to continue working. We'll remove it in a future release.
if t, ok := denylist.Load(p.JWTID); ok {
if p.IssuedAt.Time.Before(t.(time.Time)) {
return true
}
}
if t, ok := userDenylist.Load(strings.Join([]string{p.ServerUUID, p.UserUUID}, ":")); ok {
if p.IssuedAt.Time.Before(t.(time.Time)) {
return true
}
}
return false
}

View File

@@ -1,91 +0,0 @@
package websocket
import (
"sync"
"time"
"golang.org/x/time/rate"
)
type LimiterBucket struct {
mu sync.RWMutex
limits map[Event]*rate.Limiter
throttles map[Event]bool
}
func (h *Handler) IsThrottled(e Event) bool {
l := h.limiter.For(e)
h.limiter.mu.Lock()
defer h.limiter.mu.Unlock()
if l.Allow() {
h.limiter.throttles[e] = false
return false
}
// If not allowed, track the throttling and send an event over the wire
// if one wasn't already sent in the same throttling period.
if v, ok := h.limiter.throttles[e]; !v || !ok {
h.limiter.throttles[e] = true
h.Logger().WithField("event", e).Debug("throttling websocket due to event volume")
_ = h.unsafeSendJson(&Message{Event: ThrottledEvent, Args: []string{string(e)}})
}
return true
}
func NewLimiter() *LimiterBucket {
return &LimiterBucket{
limits: make(map[Event]*rate.Limiter, 4),
throttles: make(map[Event]bool, 4),
}
}
// For returns the internal rate limiter for the given event type. In most
// cases this is a shared rate limiter for events, but certain "heavy" or low-frequency
// events implement their own limiters.
func (l *LimiterBucket) For(e Event) *rate.Limiter {
name := limiterName(e)
l.mu.RLock()
if v, ok := l.limits[name]; ok {
l.mu.RUnlock()
return v
}
l.mu.RUnlock()
l.mu.Lock()
defer l.mu.Unlock()
limit, burst := limitValuesFor(e)
l.limits[name] = rate.NewLimiter(limit, burst)
return l.limits[name]
}
// limitValuesFor returns the underlying limit and burst value for the given event.
func limitValuesFor(e Event) (rate.Limit, int) {
// Twice every five seconds.
if e == AuthenticationEvent || e == SendServerLogsEvent {
return rate.Every(time.Second * 5), 2
}
// 10 per second.
if e == SendCommandEvent {
return rate.Every(time.Second), 10
}
// 4 per second.
return rate.Every(time.Second), 4
}
func limiterName(e Event) Event {
if e == AuthenticationEvent || e == SendServerLogsEvent || e == SendCommandEvent {
return e
}
return "_default"
}

View File

@@ -2,11 +2,11 @@ package websocket
import (
"context"
"encoding/json"
"sync"
"time"
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"
@@ -131,7 +131,7 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
continue
}
var sendErr error
message := Message{Event: Event(e.Topic)}
message := Message{Event: e.Topic}
if str, ok := e.Data.(string); ok {
message.Args = []string{str}
} else if b, ok := e.Data.([]byte); ok {
@@ -149,7 +149,7 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
continue
}
}
onError(string(message.Event), sendErr)
onError(message.Event, sendErr)
}
break
}

View File

@@ -1,24 +1,21 @@
package websocket
type Event string
const (
AuthenticationSuccessEvent = Event("auth success")
TokenExpiringEvent = Event("token expiring")
TokenExpiredEvent = Event("token expired")
AuthenticationEvent = Event("auth")
SetStateEvent = Event("set state")
SendServerLogsEvent = Event("send logs")
SendCommandEvent = Event("send command")
SendStatsEvent = Event("send stats")
ErrorEvent = Event("daemon error")
JwtErrorEvent = Event("jwt error")
ThrottledEvent = Event("throttled")
AuthenticationSuccessEvent = "auth success"
TokenExpiringEvent = "token expiring"
TokenExpiredEvent = "token expired"
AuthenticationEvent = "auth"
SetStateEvent = "set state"
SendServerLogsEvent = "send logs"
SendCommandEvent = "send command"
SendStatsEvent = "send stats"
ErrorEvent = "daemon error"
JwtErrorEvent = "jwt error"
)
type Message struct {
// The event to perform.
Event Event `json:"event"`
Event string `json:"event"`
// The data to pass along, only used by power/command currently. Other requests
// should either omit the field or pass an empty value as it is ignored.

View File

@@ -2,20 +2,21 @@ package websocket
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"sync"
"time"
"github.com/pterodactyl/wings/internal/models"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gbrlsnchs/jwt/v3"
"github.com/gin-gonic/gin"
"github.com/goccy/go-json"
"github.com/google/uuid"
"github.com/gorilla/websocket"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/system"
@@ -45,7 +46,6 @@ type Handler struct {
server *server.Server
ra server.RequestActivity
uuid uuid.UUID
limiter *LimiterBucket
}
var (
@@ -84,7 +84,6 @@ func NewTokenPayload(token []byte) (*tokens.WebsocketPayload, error) {
// GetHandler returns a new websocket handler using the context provided.
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin.Context) (*Handler, error) {
upgrader := websocket.Upgrader{
EnableCompression: true,
// Ensure that the websocket request is originating from the Panel itself,
// and not some other location.
CheckOrigin: func(r *http.Request) bool {
@@ -111,16 +110,12 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin
return nil, err
}
conn.SetReadLimit(4096)
_ = conn.SetCompressionLevel(5)
return &Handler{
Connection: conn,
jwt: nil,
server: s,
ra: s.NewRequestActivity("", c.ClientIP()),
uuid: u,
limiter: NewLimiter(),
}, nil
}
@@ -155,7 +150,7 @@ func (h *Handler) SendJson(v Message) error {
// If the user does not have permission to see backup events, do not emit
// them over the socket.
if strings.HasPrefix(string(v.Event), server.BackupCompletedEvent) {
if strings.HasPrefix(v.Event, server.BackupCompletedEvent) {
if !j.HasPermission(PermissionReceiveBackups) {
return nil
}
@@ -282,14 +277,6 @@ func (h *Handler) setJwt(token *tokens.WebsocketPayload) {
// HandleInbound handles an inbound socket request and route it to the proper action.
func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
if h.server.IsSuspended() {
return server.ErrSuspended
}
if h.IsThrottled(m.Event) {
return nil
}
if m.Event != AuthenticationEvent {
if err := h.TokenValid(); err != nil {
h.unsafeSendJson(Message{

View File

@@ -153,29 +153,18 @@ func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (
s.Log().Debug("starting file writing process for backup restoration")
err = b.Restore(s.Context(), reader, func(file string, info fs.FileInfo, r io.ReadCloser) error {
defer r.Close()
if file == "." {
return nil
}
s.Events().Publish(DaemonMessageEvent, "(restoring): "+file)
if info.IsDir() {
if err := s.Filesystem().Mkdir(file, info.Mode().Perm()); err != nil {
if !errors.Is(err, os.ErrExist) {
return errors.WithStack(err)
}
}
} else {
if !info.Mode().IsRegular() {
return nil
}
if err := s.Filesystem().Write(file, r, info.Size(), info.Mode().Perm()); err != nil {
return errors.WithStack(err)
}
if err := s.Filesystem().Writefile(file, r); err != nil {
return err
}
if err := s.Filesystem().Chmod(file, info.Mode()); err != nil {
return err
}
atime := info.ModTime()
return s.Filesystem().Chtimes(file, atime, atime)
mtime := atime
return s.Filesystem().Chtimes(file, atime, mtime)
})
return errors.WithStackIf(err)

View File

@@ -11,17 +11,16 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/mholt/archives"
"github.com/mholt/archiver/v4"
"golang.org/x/sync/errgroup"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/remote"
)
var format = archives.CompressedArchive{
Compression: archives.Gz{},
Archival: archives.Tar{},
Extraction: archives.Tar{},
var format = archiver.CompressedArchive{
Compression: archiver.Gz{},
Archival: archiver.Tar{},
}
type AdapterType string

View File

@@ -4,11 +4,10 @@ import (
"context"
"io"
"os"
"strings"
"emperror.dev/errors"
"github.com/juju/ratelimit"
"github.com/mholt/archives"
"github.com/mholt/archiver/v4"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/remote"
@@ -61,23 +60,13 @@ func (b *LocalBackup) WithLogContext(c map[string]interface{}) {
// Generate generates a backup of the selected files and pushes it to the
// defined location for this instance.
func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*ArchiveDetails, error) {
r, err := os.OpenRoot(basePath)
if err != nil {
return nil, errors.Wrap(err, "server/backup: failed to open root directory")
}
defer r.Close()
a, err := filesystem.NewArchive(r, "/", filesystem.WithIgnored(strings.Split(ignore, "\n")))
if err != nil {
return nil, errors.WrapIf(err, "server/backup: failed to create archive")
a := &filesystem.Archive{
BasePath: basePath,
Ignore: ignore,
}
b.log().WithField("path", b.Path()).Info("creating backup for server")
f, err := os.OpenFile(b.Path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
return nil, errors.Wrap(err, "server/backup: failed to open file for writing")
}
defer f.Close()
if err := a.Create(ctx, f); err != nil {
if err := a.Create(ctx, b.Path()); err != nil {
return nil, err
}
b.log().Info("created backup successfully")
@@ -104,14 +93,14 @@ func (b *LocalBackup) Restore(ctx context.Context, _ io.Reader, callback Restore
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
reader = ratelimit.Reader(f, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
}
if err := format.Extract(ctx, reader, func(ctx context.Context, f archives.FileInfo) error {
if err := format.Extract(ctx, reader, nil, func(ctx context.Context, f archiver.File) error {
r, err := f.Open()
if err != nil {
return err
}
defer r.Close()
return callback(f.NameInArchive, f.FileInfo, r)
return callback(filesystem.ExtractNameFromArchive(f), f.FileInfo, r)
}); err != nil {
return err
}

View File

@@ -7,13 +7,12 @@ import (
"net/http"
"os"
"strconv"
"strings"
"time"
"emperror.dev/errors"
"github.com/cenkalti/backoff/v4"
"github.com/juju/ratelimit"
"github.com/mholt/archives"
"github.com/mholt/archiver/v4"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/remote"
@@ -52,33 +51,24 @@ func (s *S3Backup) WithLogContext(c map[string]interface{}) {
func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*ArchiveDetails, error) {
defer s.Remove()
r, err := os.OpenRoot(basePath)
if err != nil {
return nil, errors.Wrap(err, "backup: failed to open root directory")
}
defer r.Close()
a, err := filesystem.NewArchive(r, "/", filesystem.WithIgnored(strings.Split(ignore, "\n")))
if err != nil {
return nil, errors.WrapIf(err, "backup: failed to create archive")
a := &filesystem.Archive{
BasePath: basePath,
Ignore: ignore,
}
s.log().WithField("path", s.Path()).Info("creating backup for server")
f, err := os.OpenFile(s.Path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
return nil, errors.Wrap(err, "backup: failed to open file for writing")
}
defer f.Close()
if err := a.Create(ctx, f); err != nil {
if err := a.Create(ctx, s.Path()); err != nil {
return nil, err
}
s.log().Info("created backup successfully")
_ = f.Sync()
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, errors.Wrap(err, "backup: failed to seek on file")
rc, err := os.Open(s.Path())
if err != nil {
return nil, errors.Wrap(err, "backup: could not read archive from disk")
}
defer rc.Close()
parts, err := s.generateRemoteRequest(ctx, f)
parts, err := s.generateRemoteRequest(ctx, rc)
if err != nil {
return nil, err
}
@@ -103,14 +93,14 @@ func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCal
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
reader = ratelimit.Reader(r, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
}
if err := format.Extract(ctx, reader, func(ctx context.Context, f archives.FileInfo) error {
if err := format.Extract(ctx, reader, nil, func(ctx context.Context, f archiver.File) error {
r, err := f.Open()
if err != nil {
return err
}
defer r.Close()
return callback(f.NameInArchive, f.FileInfo, r)
return callback(filesystem.ExtractNameFromArchive(f), f.FileInfo, r)
}); err != nil {
return err
}
@@ -241,6 +231,7 @@ func (fu *s3FileUploader) uploadPart(ctx context.Context, part string, size int6
return nil
}, fu.backoff(ctx))
if err != nil {
if v, ok := err.(*backoff.PermanentError); ok {
return "", v.Unwrap()

View File

@@ -1,7 +1,6 @@
package server
import (
"os"
"runtime"
"github.com/gammazero/workerpool"
@@ -11,24 +10,26 @@ import (
// a server automatically to ensure that they always use the specified values.
func (s *Server) UpdateConfigurationFiles() {
pool := workerpool.New(runtime.NumCPU())
files := s.ProcessConfiguration().ConfigurationFiles
s.Log().Debug("acquiring process configuration files...")
files := s.ProcessConfiguration().ConfigurationFiles
s.Log().Debug("acquired process configuration files")
for _, cf := range files {
f := cf
pool.Submit(func() {
fd, err := s.Filesystem().Touch(f.FileName, os.O_RDWR|os.O_CREATE, 0o644)
p, err := s.Filesystem().SafePath(f.FileName)
if err != nil {
s.Log().WithField("file_name", f.FileName).WithField("error", err).Error("failed to open configuration file")
s.Log().WithField("error", err).Error("failed to generate safe path for configuration file")
return
}
defer fd.Close()
if err := f.Parse(fd); err != nil {
s.Log().WithField("error", err).WithField("file_name", f.FileName).Error("failed to parse and update server configuration file")
if err := f.Parse(p, false); err != nil {
s.Log().WithField("error", err).Error("failed to parse and update server configuration file")
}
s.Log().WithField("file_name", f.FileName).Debug("finished processing server configuration file")
s.Log().WithField("path", f.FileName).Debug("finished processing server configuration file")
})
}

View File

@@ -1,19 +0,0 @@
package server
import (
"github.com/pterodactyl/wings/system"
)
// Sftp returns the SFTP connection bag for the server instance. This bag tracks
// all open SFTP connections by individual user and allows for a single user or
// all users to be disconnected by other processes.
func (s *Server) Sftp() *system.ContextBag {
s.Lock()
defer s.Unlock()
if s.sftpBag == nil {
s.sftpBag = system.NewContextBag(s.Context())
}
return s.sftpBag
}

View File

@@ -37,7 +37,7 @@ func (s *Server) Throttler() *ConsoleThrottle {
s.throttler = newConsoleThrottle(throttles.Lines, period)
s.throttler.strike = func() {
s.PublishConsoleOutputFromDaemon("Server is outputting console data too quickly -- throttling...")
s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Server is outputting console data too quickly -- throttling..."))
}
})
return s.throttler

View File

@@ -11,7 +11,9 @@ import (
"sync"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/juju/ratelimit"
"github.com/karrick/godirwalk"
"github.com/klauspost/pgzip"
ignore "github.com/sabhiram/go-gitignore"
@@ -21,8 +23,6 @@ import (
const memory = 4 * 1024
var ErrNoSpaceAvailable = errors.Sentinel("archive: no space available on disk")
var pool = sync.Pool{
New: func() interface{} {
b := make([]byte, memory)
@@ -36,8 +36,7 @@ type TarProgress struct {
p *progress.Progress
}
// NewTarProgress returns a new progress writer for the tar file. This is a wrapper
// around the standard writer with a progress instance embedded.
// NewTarProgress .
func NewTarProgress(w *tar.Writer, p *progress.Progress) *TarProgress {
if p != nil {
p.Writer = w
@@ -56,79 +55,34 @@ func (p *TarProgress) Write(v []byte) (int, error) {
return p.p.Write(v)
}
type ArchiveOption func(a *Archive) error
type Archive struct {
root *os.Root
dir string
pw *TarProgress
ignored *ignore.GitIgnore
matching *ignore.GitIgnore
p *progress.Progress
// BasePath is the absolute path to create the archive from where Files and Ignore are
// relative to.
BasePath string
// Ignore is a gitignore string (most likely read from a file) of files to ignore
// from the archive.
Ignore string
// Files specifies the files to archive, this takes priority over the Ignore option, if
// unspecified, all files in the BasePath will be archived unless Ignore is set.
Files []string
// Progress wraps the writer of the archive to pass through the progress tracker.
Progress *progress.Progress
}
// NewArchive returns a new archive instance that can be used for generating an
// archive of files and folders within the provided os.Root. The "dir" value is
// a child directory within the `os.Root` instance.
func NewArchive(r *os.Root, dir string, opts ...ArchiveOption) (*Archive, error) {
a := &Archive{root: r, dir: dir}
for _, opt := range opts {
if err := opt(a); err != nil {
return nil, errors.Wrap(err, "server/filesystem: archive: failed to apply callback option")
}
// Create creates an archive at dst with all the files defined in the
// included Files array.
func (a *Archive) Create(ctx context.Context, dst string) error {
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
return err
}
return a, nil
}
defer f.Close()
func WithProgress(p *progress.Progress) ArchiveOption {
return func(a *Archive) error {
a.p = p
return nil
}
}
func WithIgnored(files []string) ArchiveOption {
return func(a *Archive) error {
if a.matching != nil {
return errors.NewPlain("cannot create an archive with both ignored and matching configurations")
}
a.ignored = ignore.CompileIgnoreLines(files...)
return nil
}
}
func WithMatching(files []string) ArchiveOption {
return func(a *Archive) error {
if a.ignored != nil {
return errors.NewPlain("cannot create an archive with both ignored and matching configurations")
}
lines := make([]string, len(files))
for _, f := range files {
// The old archiver logic just accepted an array of paths to include in the
// archive and did rudimentary logic to determine if they should be included.
// This newer logic makes use of the gitignore (flipped to make it an allowlist),
// but to do that we need to make sure all the provided values here start with a
// slash; otherwise files/folders nested deeply might be unintentionally included.
lines = append(lines, "/"+strings.TrimPrefix(f, "/"))
}
a.matching = ignore.CompileIgnoreLines(lines...)
return nil
}
}
func (a *Archive) Progress() *progress.Progress {
return a.p
}
// Create .
func (a *Archive) Create(ctx context.Context, f *os.File) error {
// Select a writer based off of the WriteLimit configuration option. If there is no
// write limit use the file as the writer.
// write limit, use the file as the writer.
var writer io.Writer
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
// Token bucket with a capacity of "writeLimit" MiB, adding "writeLimit" MiB/s
@@ -141,8 +95,7 @@ func (a *Archive) Create(ctx context.Context, f *os.File) error {
return a.Stream(ctx, writer)
}
// Stream walks the given root directory and generates an archive from the
// provided files.
// Stream .
func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
// Choose which compression level to use based on the compression_level configuration option
var compressionLevel int
@@ -151,6 +104,8 @@ func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
compressionLevel = pgzip.NoCompression
case "best_compression":
compressionLevel = pgzip.BestCompression
case "best_speed":
fallthrough
default:
compressionLevel = pgzip.BestSpeed
}
@@ -164,55 +119,105 @@ func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
tw := tar.NewWriter(gw)
defer tw.Close()
a.pw = NewTarProgress(tw, a.p)
defer a.pw.Close()
pw := NewTarProgress(tw, a.Progress)
r, err := a.root.OpenRoot(normalize(a.dir))
if err != nil {
return errors.Wrap(err, "server/filesystem: archive: failed to acquire root dir instance")
// Configure godirwalk.
options := &godirwalk.Options{
FollowSymbolicLinks: false,
Unsorted: true,
}
defer r.Close()
base := strings.TrimRight(r.Name(), "./")
return filepath.WalkDir(base, a.walker(ctx, base))
// If we're specifically looking for only certain files, or have requested
// that certain files be ignored we'll update the callback function to reflect
// that request.
var callback godirwalk.WalkFunc
if len(a.Files) == 0 && len(a.Ignore) > 0 {
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
callback = a.callback(pw, func(_ string, rp string) error {
if i.MatchesPath(rp) {
return godirwalk.SkipThis
}
return nil
})
} else if len(a.Files) > 0 {
callback = a.withFilesCallback(pw)
} else {
callback = a.callback(pw)
}
// Set the callback function, wrapped with support for context cancellation.
options.Callback = func(path string, de *godirwalk.Dirent) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return callback(path, de)
}
}
// Recursively walk the path we are archiving.
return godirwalk.Walk(a.BasePath, options)
}
// Callback function used to determine if a given file should be included in the archive
// being generated.
func (a *Archive) walker(ctx context.Context, base string) fs.WalkDirFunc {
return func(path string, de fs.DirEntry, err error) error {
if ctx.Err() != nil {
return ctx.Err()
}
if err != nil {
return fs.SkipDir
}
path = strings.TrimPrefix(path, base)
if a.ignored != nil && a.ignored.MatchesPath(path) {
func (a *Archive) callback(tw *TarProgress, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
return func(path string, de *godirwalk.Dirent) error {
// Skip directories because we are walking them recursively.
if de.IsDir() {
return nil
}
if a.matching != nil && !a.matching.MatchesPath(path) {
return nil
relative := filepath.ToSlash(strings.TrimPrefix(path, a.BasePath+string(filepath.Separator)))
// Call the additional options passed to this callback function. If any of them return
// a non-nil error we will exit immediately.
for _, opt := range opts {
if err := opt(path, relative); err != nil {
return err
}
}
// Add the file to the archive, if it is nested in a directory,
// the directory will be automatically "created" in the archive.
return a.addToArchive(path)
return a.addToArchive(path, relative, tw)
}
}
// Pushes only files defined in the Files key to the final archive.
func (a *Archive) withFilesCallback(tw *TarProgress) func(path string, de *godirwalk.Dirent) error {
return a.callback(tw, func(p string, rp string) error {
for _, f := range a.Files {
// If the given doesn't match, or doesn't have the same prefix continue
// to the next item in the loop.
if p != f && !strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", f) {
continue
}
// Once we have a match return a nil value here so that the loop stops and the
// call to this function will correctly include the file in the archive. If there
// are no matches we'll never make it to this line, and the final error returned
// will be the godirwalk.SkipThis error.
return nil
}
return godirwalk.SkipThis
})
}
// Adds a given file path to the final archive being created.
func (a *Archive) addToArchive(p string) error {
p = normalize(p)
s, err := a.root.Lstat(p)
func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
// Lstat the file, this will give us the same information as Stat except that it will not
// follow a symlink to its target automatically. This is important to avoid including
// files that exist outside the server root unintentionally in the backup.
s, err := os.Lstat(p)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrap(err, "server/filesystem: archive: failed to stat file")
return errors.WrapIff(err, "failed executing os.Lstat on '%s'", rp)
}
// Skip socket files as they are unsupported by archive/tar.
@@ -224,27 +229,34 @@ func (a *Archive) addToArchive(p string) error {
// Resolve the symlink target if the file is a symlink.
var target string
if s.Mode()&fs.ModeSymlink != 0 {
// This intentionally uses [os.Readlink] and not the [os.Root] instance. We need to
// know the actual target for the symlink, even if outside the server directory, so
// that we can restore it properly.
//
// This target is only used for the sake of keeping everything correct in the archive;
// we never read the target file contents.
target, err = os.Readlink(filepath.Join(a.root.Name(), p))
// Read the target of the symlink. If there are any errors we will dump them out to
// the logs, but we're not going to stop the backup. There are far too many cases of
// symlinks causing all sorts of unnecessary pain in this process. Sucks to suck if
// it doesn't work.
target, err = os.Readlink(s.Name())
if err != nil {
target = ""
// Ignore the not exist errors specifically, since there is nothing important about that.
if !os.IsNotExist(err) {
log.WithField("path", rp).WithField("readlink_err", err.Error()).Warn("failed reading symlink for target path; skipping...")
}
return nil
}
}
// Get the tar FileInfoHeader to add the file to the archive.
header, err := tar.FileInfoHeader(s, target)
// Get the tar FileInfoHeader in order to add the file to the archive.
header, err := tar.FileInfoHeader(s, filepath.ToSlash(target))
if err != nil {
return errors.Wrap(err, "server/filesystem: archive: failed to get file info header")
return errors.WrapIff(err, "failed to get tar#FileInfoHeader for '%s'", rp)
}
header.Name = p
if err := a.pw.WriteHeader(header); err != nil {
return errors.Wrap(err, "server/filesystem: archive: failed to write tar header")
// Fix the header name if the file is not a symlink.
if s.Mode()&fs.ModeSymlink == 0 {
header.Name = rp
}
// Write the tar FileInfoHeader to the archive.
if err := w.WriteHeader(header); err != nil {
return errors.WrapIff(err, "failed to write tar#FileInfoHeader for '%s'", rp)
}
// If the size of the file is less than 1 (most likely for symlinks), skip writing the file.
@@ -265,17 +277,19 @@ func (a *Archive) addToArchive(p string) error {
}()
}
f, err := a.root.Open(p)
// Open the file.
f, err := os.Open(p)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrap(err, "server/filesystem: archive: failed to open file for copying")
return errors.WrapIff(err, "failed to open '%s' for copying", header.Name)
}
defer f.Close()
if _, err := io.CopyBuffer(a.pw, io.LimitReader(f, header.Size), buf); err != nil {
return errors.Wrap(err, "server/filesystem: archive: failed to copy file to archive")
// Copy the file's contents to the archive using our buffer.
if _, err := io.CopyBuffer(w, io.LimitReader(f, header.Size), buf); err != nil {
return errors.WrapIff(err, "failed to copy '%s' to archive", header.Name)
}
return nil

View File

@@ -1,161 +0,0 @@
package filesystem
import (
"context"
iofs "io/fs"
"os"
"path/filepath"
"sort"
"strings"
"testing"
. "github.com/franela/goblin"
"github.com/mholt/archives"
)
func TestArchive_Stream(t *testing.T) {
g := Goblin(t)
fs := NewFs()
g.Describe("Archive", func() {
g.AfterEach(func() {
fs.reset()
})
g.It("creates archive with intended files", func() {
g.Assert(fs.CreateDirectory("test", "/")).IsNil()
g.Assert(fs.CreateDirectory("test2", "/")).IsNil()
err := fs.Writefile("test/file.txt", strings.NewReader("hello, world!\n"))
g.Assert(err).IsNil()
err = fs.Writefile("test2/file.txt", strings.NewReader("hello, world!\n"))
g.Assert(err).IsNil()
err = fs.Writefile("test_file.txt", strings.NewReader("hello, world!\n"))
g.Assert(err).IsNil()
err = fs.Writefile("test_file.txt.old", strings.NewReader("hello, world!\n"))
g.Assert(err).IsNil()
archivePath := filepath.Join(fs.rootPath, "../archive.tar.gz")
f, err := os.Create(archivePath)
if err != nil {
panic(err)
}
defer f.Close()
a, err := NewArchive(fs.root, ".", WithMatching([]string{"test", "test_file.txt"}))
g.Assert(a.Create(context.Background(), f)).IsNil()
// Open the archive.
genericFs, err := archives.FileSystem(context.Background(), archivePath, nil)
g.Assert(err).IsNil()
// Assert that we are opening an archive.
afs, ok := genericFs.(iofs.ReadDirFS)
g.Assert(ok).IsTrue()
// Get the names of the files recursively from the archive.
files, err := getFiles(afs, ".")
g.Assert(err).IsNil()
// Ensure the files in the archive match what we are expecting.
expected := []string{
"test_file.txt",
"test/file.txt",
}
// Sort the slices to ensure the comparison never fails if the
// contents are sorted differently.
sort.Strings(expected)
sort.Strings(files)
g.Assert(files).Equal(expected)
})
g.It("does not archive files outside of root", func() {
if err := os.MkdirAll(filepath.Join(fs.rootPath, "../outer"), 0o755); err != nil {
panic(err)
}
fs.write("test.txt", []byte("test"))
fs.write("../danger-1.txt", []byte("danger"))
fs.write("../outer/danger-2.txt", []byte("danger"))
if err := os.Symlink("../danger-1.txt", filepath.Join(fs.rootPath, "symlink.txt")); err != nil {
panic(err)
}
if err := os.Symlink("../outer", filepath.Join(fs.rootPath, "danger-dir")); err != nil {
panic(err)
}
archivePath := filepath.Join(fs.rootPath, "../archive.tar.gz")
f, err := os.Create(archivePath)
if err != nil {
panic(err)
}
defer f.Close()
a, err := NewArchive(fs.root, ".")
if err != nil {
panic(err)
}
err = a.Create(context.Background(), f)
g.Assert(err).IsNil()
// Open the archive.
genericFs, err := archives.FileSystem(context.Background(), archivePath, nil)
g.Assert(err).IsNil()
// Assert that we are opening an archive.
afs, ok := genericFs.(iofs.ReadDirFS)
g.Assert(ok).IsTrue()
// Get the names of the files recursively from the archive.
files, err := getFiles(afs, ".")
g.Assert(err).IsNil()
// We expect the actual symlinks themselves, but not the contents of the directory
// or the file itself. We're storing the symlinked file in the archive so that
// expanding it back is the same, but you won't have the inner contents.
g.Assert(files).Equal([]string{"danger-dir", "symlink.txt", "test.txt"})
})
})
}
func getFiles(f iofs.ReadDirFS, name string) ([]string, error) {
var v []string
entries, err := f.ReadDir(name)
if err != nil {
return nil, err
}
for _, e := range entries {
entryName := e.Name()
if name != "." {
entryName = filepath.Join(name, entryName)
}
if e.IsDir() {
files, err := getFiles(f, entryName)
if err != nil {
return nil, err
}
if files == nil {
return nil, nil
}
v = append(v, files...)
continue
}
v = append(v, entryName)
}
return v, nil
}

View File

@@ -1,82 +0,0 @@
package filesystem
import (
"fmt"
fs2 "io/fs"
"os"
"path/filepath"
"strings"
"time"
"emperror.dev/errors"
"github.com/pterodactyl/wings/config"
)
func (fs *Filesystem) Chmod(path string, mode os.FileMode) error {
path = strings.TrimLeft(filepath.Clean(path), "/")
if path == "" {
path = "."
}
if err := fs.root.Chmod(path, mode); err != nil {
return errors.Wrap(err, "server/filesystem: chmod: failed to chmod path")
}
return nil
}
// Chown recursively iterates over a file or directory and sets the permissions on all the
// underlying files. Iterate over all the files and directories. If it is a file go ahead
// and perform the chown operation. Otherwise dig deeper into the directory until we've run
// out of directories to dig into.
func (fs *Filesystem) Chown(p string) error {
p = normalize(p)
uid := config.Get().System.User.Uid
gid := config.Get().System.User.Gid
if err := fs.root.Chown(p, uid, gid); err != nil {
return errors.WrapIf(err, "server/filesystem: chown: failed to chown path")
}
// If this is not a directory, we can now return from the function; there is nothing
// left that we need to do.
if st, err := fs.root.Stat(p); err != nil || !st.IsDir() {
if err == nil || errors.Is(err, os.ErrNotExist) {
return nil
}
return errors.WrapIf(err, "server/filesystem: chown: failed to stat path")
}
rt := fs.rootPath
if p == "." {
r, err := fs.root.OpenRoot(p)
if err != nil {
return errors.WithStack(err)
}
defer r.Close()
rt = r.Name()
}
// If this was a directory, begin walking over its contents recursively and ensure that all
// the subfiles and directories get their permissions updated as well.
return filepath.WalkDir(rt, func(path string, _ fs2.DirEntry, err error) error {
path = normalize(path)
if path == "." {
return nil
}
if err := fs.root.Chown(path, uid, gid); err != nil {
return errors.Wrap(err, fmt.Sprintf("server/filesystem: chown: failed to chown file"))
}
return nil
})
}
func (fs *Filesystem) Chtimes(path string, atime, mtime time.Time) error {
path = strings.TrimLeft(filepath.Clean(path), "/")
if err := fs.root.Chtimes(path, atime, mtime); err != nil {
return errors.Wrap(err, "server/filesystem: chtimes: failed to chtimes path")
}
return nil
}

View File

@@ -1,61 +1,125 @@
package filesystem
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"context"
"fmt"
"io"
iofs "io/fs"
"os"
"path"
"path/filepath"
"reflect"
"strings"
"sync/atomic"
"time"
"emperror.dev/errors"
"github.com/mholt/archives"
"github.com/pterodactyl/wings/internal"
gzip2 "github.com/klauspost/compress/gzip"
zip2 "github.com/klauspost/compress/zip"
"github.com/mholt/archiver/v4"
)
type extractOptions struct {
dir string
file string
format archives.Format
r io.Reader
}
// CompressFiles compresses all the files matching the given paths in the
// specified directory. This function also supports passing nested paths to only
// compress certain files and folders when working in a larger directory. This
// effectively creates a local backup, but rather than ignoring specific files
// and folders, it takes an allowlist of files and folders.
// and folders, it takes an allow-list of files and folders.
//
// All paths are relative to the dir that is passed in as the first argument,
// and the compressed file will be placed at that location named
// `archive-{date}.tar.gz`.
func (fs *Filesystem) CompressFiles(ctx context.Context, dir string, paths []string) (os.FileInfo, error) {
a, err := NewArchive(fs.root, dir, WithMatching(paths))
func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) {
cleanedRootDir, err := fs.SafePath(dir)
if err != nil {
return nil, errors.WrapIf(err, "server/filesystem: compress: failed to create archive instance")
}
n := fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", ""))
f, err := fs.root.OpenFile(normalize(filepath.Join(dir, n)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
return nil, errors.Wrap(err, "server/filesystem: compress: failed to open file for writing")
}
defer f.Close()
cw := internal.NewCountedWriter(f)
// todo: eventing on the counted writer so that we can slowly increase the disk
// used value on the server as the file gets written?
if err := a.Stream(ctx, cw); err != nil {
return nil, errors.Wrap(err, "server/filesystem: compress: failed to write to disk")
}
if err := fs.HasSpaceFor(cw.BytesWritten()); err != nil {
_ = fs.root.Remove(normalize(filepath.Join(dir, n)))
return nil, err
}
fs.addDisk(cw.BytesWritten())
return f.Stat()
// Take all the paths passed in and merge them together with the root directory we've gotten.
for i, p := range paths {
paths[i] = filepath.Join(cleanedRootDir, p)
}
cleaned, err := fs.ParallelSafePath(paths)
if err != nil {
return nil, err
}
a := &Archive{BasePath: cleanedRootDir, Files: cleaned}
d := path.Join(
cleanedRootDir,
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
)
if err := a.Create(context.Background(), d); err != nil {
return nil, err
}
f, err := os.Stat(d)
if err != nil {
_ = os.Remove(d)
return nil, err
}
if err := fs.HasSpaceFor(f.Size()); err != nil {
_ = os.Remove(d)
return nil, err
}
fs.addDisk(f.Size())
return f, nil
}
// SpaceAvailableForDecompression looks through a given archive and determines
// if decompressing it would put the server over its allocated disk space limit.
func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir string, file string) error {
// Don't waste time trying to determine this if we know the server will have the space for
// it since there is no limit.
if fs.MaxDisk() <= 0 {
return nil
}
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return err
}
// Get the cached size in a parallel process so that if it is not cached we are not
// waiting an unnecessary amount of time on this call.
dirSize, err := fs.DiskUsage(false)
fsys, err := archiver.FileSystem(source)
if err != nil {
if errors.Is(err, archiver.ErrNoMatch) {
return newFilesystemError(ErrCodeUnknownArchive, err)
}
return err
}
var size int64
return iofs.WalkDir(fsys, ".", func(path string, d iofs.DirEntry, err error) error {
if err != nil {
return err
}
select {
case <-ctx.Done():
// Stop walking if the context is canceled.
return ctx.Err()
default:
info, err := d.Info()
if err != nil {
return err
}
if atomic.AddInt64(&size, info.Size())+dirSize > fs.MaxDisk() {
return newFilesystemError(ErrCodeDiskSpace, nil)
}
return nil
}
})
}
// DecompressFile will decompress a file in a given directory by using the
@@ -64,137 +128,147 @@ func (fs *Filesystem) CompressFiles(ctx context.Context, dir string, paths []str
// zip-slip attack being attempted by validating that the final path is within
// the server data directory.
func (fs *Filesystem) DecompressFile(ctx context.Context, dir string, file string) error {
f, err := fs.root.Open(normalize(filepath.Join(dir, file)))
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return errors.Wrap(err, "server/filesystem: decompress: failed to open file")
return err
}
return fs.DecompressFileUnsafe(ctx, dir, source)
}
// DecompressFileUnsafe will decompress any file on the local disk without checking
// if it is owned by the server. The file will be SAFELY decompressed and extracted
// into the server's directory.
func (fs *Filesystem) DecompressFileUnsafe(ctx context.Context, dir string, file string) error {
// Ensure that the archive actually exists on the system.
if _, err := os.Stat(file); err != nil {
return errors.WithStack(err)
}
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
format, input, err := archives.Identify(ctx, filepath.Base(file), f)
// Identify the type of archive we are dealing with.
format, input, err := archiver.Identify(filepath.Base(file), f)
if err != nil {
if errors.Is(err, archives.NoMatch) {
if errors.Is(err, archiver.ErrNoMatch) {
return newFilesystemError(ErrCodeUnknownArchive, err)
}
return errors.Wrap(err, "server/filesystem: decompress: failed to identify archive format")
return err
}
return fs.extractStream(ctx, extractOptions{dir: dir, file: file, format: format, r: input})
}
func (fs *Filesystem) extractStream(ctx context.Context, opts extractOptions) error {
// See if it's a compressed archive, such as TAR or a ZIP
ex, ok := opts.format.(archives.Extractor)
if !ok {
// If not, check if it's a single-file compression, such as
// .log.gz, .sql.gz, and so on
de, ok := opts.format.(archives.Decompressor)
if !ok {
return nil
}
p := filepath.Join(opts.dir, strings.TrimSuffix(opts.file, opts.format.Extension()))
if err := fs.IsIgnored(p); err != nil {
return nil
}
reader, err := de.OpenReader(opts.r)
if err != nil {
return errors.Wrap(err, "server/filesystem: decompress: failed to open reader")
}
defer reader.Close()
// Open the file for creation/writing
f, err := fs.root.OpenFile(normalize(p), os.O_WRONLY|os.O_CREATE, 0o644)
if err != nil {
return errors.Wrap(err, "server/filesystem: decompress: failed to open file")
}
defer f.Close()
// Read in 4 KB chunks
buf := make([]byte, 4096)
for {
n, err := reader.Read(buf)
if n > 0 {
if err := fs.HasSpaceFor(int64(n)); err != nil {
return err
}
if _, err := f.Write(buf[:n]); err != nil {
return errors.Wrap(err, "server/filesystem: decompress: failed to write")
}
fs.addDisk(int64(n))
}
if err != nil {
if err == io.EOF {
break
}
return errors.Wrap(err, "server/filesystem: decompress: failed to read")
}
}
return nil
}
// Decompress and extract archive
return ex.Extract(ctx, opts.r, func(ctx context.Context, f archives.FileInfo) error {
if f.IsDir() {
return nil
}
p := filepath.Join(opts.dir, f.NameInArchive)
if err := fs.IsIgnored(p); err != nil {
return nil
}
r, err := f.Open()
if err != nil {
return err
}
defer r.Close()
if f.Mode()&os.ModeSymlink != 0 {
// Try to create the symlink if it is in the archive, but don't hold up the process
// if the file cannot be created. In that case just skip over it entirely.
if f.LinkTarget != "" {
p2 := strings.TrimLeft(filepath.Clean(p), string(filepath.Separator))
if p2 == "" {
p2 = "."
}
// We don't use [fs.Symlink] here because that normalizes the source directory for
// consistency with the codebase. In this case when decompressing we want to just
// accept the source without any normalization.
if err := fs.root.Symlink(f.LinkTarget, p2); err != nil {
if errors.Is(err, os.ErrNotExist) || IsPathError(err) || IsLinkError(err) {
return nil
}
return errors.Wrap(err, "server/filesystem: decompress: failed to create symlink")
}
}
return nil
}
if err := fs.Write(p, r, f.Size(), f.Mode().Perm()); err != nil {
return errors.Wrap(err, "server/filesystem: decompress: failed to write file")
}
// Update the file modification time to the one set in the archive.
if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
return errors.Wrap(err, "server/filesystem: decompress: failed to update file modification time")
}
return nil
return fs.extractStream(ctx, extractStreamOptions{
Directory: dir,
Format: format,
Reader: input,
})
}
// ExtractStreamUnsafe .
func (fs *Filesystem) ExtractStreamUnsafe(ctx context.Context, dir string, r io.Reader) error {
format, input, err := archives.Identify(ctx, "archive.tar.gz", r)
format, input, err := archiver.Identify("archive.tar.gz", r)
if err != nil {
if errors.Is(err, archives.NoMatch) {
if errors.Is(err, archiver.ErrNoMatch) {
return newFilesystemError(ErrCodeUnknownArchive, err)
}
return err
}
return fs.extractStream(ctx, extractOptions{
dir: dir,
format: format,
r: input,
return fs.extractStream(ctx, extractStreamOptions{
Directory: dir,
Format: format,
Reader: input,
})
}
type extractStreamOptions struct {
// The directory to extract the archive to.
Directory string
// File name of the archive.
FileName string
// Format of the archive.
Format archiver.Format
// Reader for the archive.
Reader io.Reader
}
func (fs *Filesystem) extractStream(ctx context.Context, opts extractStreamOptions) error {
// Decompress and extract archive
if ex, ok := opts.Format.(archiver.Extractor); ok {
return ex.Extract(ctx, opts.Reader, nil, func(ctx context.Context, f archiver.File) error {
if f.IsDir() {
return nil
}
p := filepath.Join(opts.Directory, ExtractNameFromArchive(f))
// If it is ignored, just don't do anything with the file and skip over it.
if err := fs.IsIgnored(p); err != nil {
return nil
}
r, err := f.Open()
if err != nil {
return err
}
defer r.Close()
if err := fs.Writefile(p, r); err != nil {
return wrapError(err, opts.FileName)
}
// Update the file permissions to the one set in the archive.
if err := fs.Chmod(p, f.Mode()); err != nil {
return wrapError(err, opts.FileName)
}
// Update the file modification time to the one set in the archive.
if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
return wrapError(err, opts.FileName)
}
return nil
})
}
return nil
}
// ExtractNameFromArchive looks at an archive file to try and determine the name
// for a given element in an archive. Because of... who knows why, each file type
// uses different methods to determine the file name.
//
// If there is a archiver.File#Sys() value present we will try to use the name
// present in there, otherwise falling back to archiver.File#Name() if all else
// fails. Without this logic present, some archive types such as zip/tars/etc.
// will write all of the files to the base directory, rather than the nested
// directory that is expected.
//
// For files like ".rar" types, there is no f.Sys() value present, and the value
// of archiver.File#Name() will be what you need.
func ExtractNameFromArchive(f archiver.File) string {
sys := f.Sys()
// Some archive types won't have a value returned when you call f.Sys() on them,
// such as ".rar" archives for example. In those cases the only thing you can do
// is hope that "f.Name()" is actually correct for them.
if sys == nil {
return f.Name()
}
switch s := sys.(type) {
case *zip.FileHeader:
return s.Name
case *zip2.FileHeader:
return s.Name
case *tar.Header:
return s.Name
case *gzip.Header:
return s.Name
case *gzip2.Header:
return s.Name
default:
// At this point we cannot figure out what type of archive this might be so
// just try to find the name field in the struct. If it is found return it.
field := reflect.Indirect(reflect.ValueOf(sys)).FieldByName("Name")
if field.IsValid() {
return field.String()
}
// Fallback to the basename of the file at this point. There is nothing we can really
// do to try and figure out what the underlying directory of the file is supposed to
// be since it didn't implement a name field.
return f.Name()
}
}

View File

@@ -0,0 +1,55 @@
package filesystem
import (
"context"
"os"
"sync/atomic"
"testing"
. "github.com/franela/goblin"
)
// Given an archive named test.{ext}, with the following file structure:
// test/
// |──inside/
// |────finside.txt
// |──outside.txt
// this test will ensure that it's being decompressed as expected
func TestFilesystem_DecompressFile(t *testing.T) {
g := Goblin(t)
fs, rfs := NewFs()
g.Describe("Decompress", func() {
for _, ext := range []string{"zip", "rar", "tar", "tar.gz"} {
g.It("can decompress a "+ext, func() {
// copy the file to the new FS
c, err := os.ReadFile("./testdata/test." + ext)
g.Assert(err).IsNil()
err = rfs.CreateServerFile("./test."+ext, c)
g.Assert(err).IsNil()
// decompress
err = fs.DecompressFile(context.Background(), "/", "test."+ext)
g.Assert(err).IsNil()
// make sure everything is where it is supposed to be
_, err = rfs.StatServerFile("test/outside.txt")
g.Assert(err).IsNil()
st, err := rfs.StatServerFile("test/inside")
g.Assert(err).IsNil()
g.Assert(st.IsDir()).IsTrue()
_, err = rfs.StatServerFile("test/inside/finside.txt")
g.Assert(err).IsNil()
g.Assert(st.IsDir()).IsTrue()
})
}
g.AfterEach(func() {
rfs.reset()
atomic.StoreInt64(&fs.diskUsed, 0)
atomic.StoreInt64(&fs.diskLimit, 0)
})
})
}

View File

@@ -1,10 +1,6 @@
package filesystem
import (
fs2 "io/fs"
"os"
"path/filepath"
"slices"
"sync"
"sync/atomic"
"syscall"
@@ -12,13 +8,13 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/karrick/godirwalk"
)
type SpaceCheckingOpts struct {
AllowStaleResponse bool
}
// TODO: can this be replaced with some sort of atomic? Like atomic.Pointer?
type usageLookupTime struct {
sync.RWMutex
value time.Time
@@ -39,13 +35,12 @@ func (ult *usageLookupTime) Get() time.Time {
return ult.value
}
// MaxDisk returns the maximum amount of disk space that this Filesystem
// instance is allowed to use.
// Returns the maximum amount of disk space that this Filesystem instance is allowed to use.
func (fs *Filesystem) MaxDisk() int64 {
return atomic.LoadInt64(&fs.diskLimit)
}
// SetDiskLimit sets the disk space limit for this Filesystem instance.
// Sets the disk space limit for this Filesystem instance.
func (fs *Filesystem) SetDiskLimit(i int64) {
atomic.SwapInt64(&fs.diskLimit, i)
}
@@ -59,17 +54,18 @@ func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
return nil
}
// HasSpaceAvailable checks if the directory a file is trying to be added to has enough
// space available for the file to be written to. Because determining the amount of space
// being used by a server is a taxing operation, we will load it all up into a cache and
// pull from that as long as the key is not expired.
// Determines if the directory a file is trying to be added to has enough space available
// for the file to be written to.
//
// This operation will potentially be blocked unless allowStaleValue is set to true. See
// the documentation on DiskUsage for how this affects the call.
// Because determining the amount of space being used by a server is a taxing operation we
// will load it all up into a cache and pull from that as long as the key is not expired.
//
// This operation will potentially block unless allowStaleValue is set to true. See the
// documentation on DiskUsage for how this affects the call.
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
size, err := fs.DiskUsage(allowStaleValue)
if err != nil {
log.WithField("root", fs.Path()).WithField("error", err).Warn("failed to determine root fs directory size")
log.WithField("root", fs.root).WithField("error", err).Warn("failed to determine root fs directory size")
}
// If space is -1 or 0 just return true, means they're allowed unlimited.
@@ -118,7 +114,7 @@ func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
// currently performing a lookup, just do the disk usage calculation in the background.
go func(fs *Filesystem) {
if _, err := fs.updateCachedDiskUsage(); err != nil {
log.WithField("root", fs.rootPath).WithField("error", err).Warn("failed to update fs disk usage from within routine")
log.WithField("root", fs.root).WithField("error", err).Warn("failed to update fs disk usage from within routine")
}
}(fs)
}
@@ -158,55 +154,41 @@ func (fs *Filesystem) updateCachedDiskUsage() (int64, error) {
return size, err
}
// DirectorySize determines the directory size of a given location. Returns the size
// in bytes. This can be a fairly taxing operation on locations with tons of files,
// so it is recommended that you cache the output.
// Determines the directory size of a given location by running parallel tasks to iterate
// through all of the folders. Returns the size in bytes. This can be a fairly taxing operation
// on locations with tons of files, so it is recommended that you cache the output.
func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
dir = normalize(dir)
if dir != "." {
if _, err := fs.root.Lstat(dir); err != nil {
return 0, err
}
}
rt := fs.root
if dir != "." {
r, err := fs.root.OpenRoot(dir)
if err != nil {
return 0, errors.Wrap(err, "server/filesystem: directorysize: failed to open root directory")
}
defer r.Close()
rt = r
d, err := fs.SafePath(dir)
if err != nil {
return 0, err
}
var size int64
var links []uint64
var st syscall.Stat_t
err = godirwalk.Walk(d, &godirwalk.Options{
Unsorted: true,
Callback: func(p string, e *godirwalk.Dirent) error {
// If this is a symlink then resolve the final destination of it before trying to continue walking
// over its contents. If it resolves outside the server data directory just skip everything else for
// it. Otherwise, allow it to continue.
if e.IsSymlink() {
if _, err := fs.SafePath(p); err != nil {
if IsErrorCode(err, ErrCodePathResolution) {
return godirwalk.SkipThis
}
return err
}
}
if !e.IsDir() {
_ = syscall.Lstat(p, &st)
atomic.AddInt64(&size, st.Size)
}
err := filepath.WalkDir(rt.Name(), func(path string, d fs2.DirEntry, err error) error {
if !d.Type().IsRegular() {
return nil
}
st, err := d.Info()
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return err
}
s := st.Sys().(*syscall.Stat_t)
if s.Nlink > 1 {
// Hard links have the same inode number, don't add them more than once.
if slices.Contains(links, s.Ino) {
return nil
}
links = append(links, s.Ino)
}
size += st.Size()
return nil
},
})
return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")

49
server/filesystem/errors.go Executable file → Normal file
View File

@@ -3,6 +3,7 @@ package filesystem
import (
"fmt"
"os"
"path/filepath"
"emperror.dev/errors"
"github.com/apex/log"
@@ -14,6 +15,7 @@ const (
ErrCodeIsDirectory ErrorCode = "E_ISDIR"
ErrCodeDiskSpace ErrorCode = "E_NODISK"
ErrCodeUnknownArchive ErrorCode = "E_UNKNFMT"
ErrCodePathResolution ErrorCode = "E_BADPATH"
ErrCodeDenylistFile ErrorCode = "E_DENYLIST"
ErrCodeUnknownError ErrorCode = "E_UNKNOWN"
ErrNotExist ErrorCode = "E_NOTEXIST"
@@ -61,6 +63,12 @@ func (e *Error) Error() string {
r = "<empty>"
}
return fmt.Sprintf("filesystem: file access prohibited: [%s] is on the denylist", r)
case ErrCodePathResolution:
r := e.resolved
if r == "" {
r = "<empty>"
}
return fmt.Sprintf("filesystem: server path [%s] resolves to a location outside the server root: %s", e.path, r)
case ErrNotExist:
return "filesystem: does not exist"
case ErrCodeUnknownError:
@@ -81,6 +89,30 @@ func (fs *Filesystem) error(err error) *log.Entry {
return log.WithField("subsystem", "filesystem").WithField("root", fs.root).WithField("error", err)
}
// Handle errors encountered when walking through directories.
//
// If there is a path resolution error just skip the item entirely. Only return this for a
// directory, otherwise return nil. Returning this error for a file will stop the walking
// for the remainder of the directory. This is assuming an os.FileInfo struct was even returned.
func (fs *Filesystem) handleWalkerError(err error, f os.FileInfo) error {
if !IsErrorCode(err, ErrCodePathResolution) {
return err
}
if f != nil && f.IsDir() {
return filepath.SkipDir
}
return nil
}
// IsFilesystemError checks if the given error is one of the Filesystem errors.
func IsFilesystemError(err error) bool {
var fserr *Error
if err != nil && errors.As(err, &fserr) {
return true
}
return false
}
// IsErrorCode checks if "err" is a filesystem Error type. If so, it will then
// drop in and check that the error code is the same as the provided ErrorCode
// passed in "code".
@@ -92,12 +124,17 @@ func IsErrorCode(err error, code ErrorCode) bool {
return false
}
func IsPathError(err error) bool {
var pe *os.PathError
return errors.As(err, &pe)
// NewBadPathResolution returns a new BadPathResolution error.
func NewBadPathResolution(path string, resolved string) error {
return errors.WithStackDepth(&Error{code: ErrCodePathResolution, path: path, resolved: resolved}, 1)
}
func IsLinkError(err error) bool {
var le *os.LinkError
return errors.As(err, &le)
// wrapError wraps the provided error as a Filesystem error and attaches the
// provided resolved source to it. If the error is already a Filesystem error
// no action is taken.
func wrapError(err error, resolved string) error {
if err == nil || IsFilesystemError(err) {
return err
}
return errors.WithStackDepth(&Error{code: ErrCodeUnknownError, err: err, resolved: resolved}, 1)
}

15
server/filesystem/errors_test.go Executable file → Normal file
View File

@@ -39,4 +39,19 @@ func TestFilesystem_PathResolutionError(t *testing.T) {
g.Assert(fserr.Unwrap()).Equal(underlying)
})
})
g.Describe("NewBadPathResolutionError", func() {
g.It("is can detect itself as an error correctly", func() {
err := NewBadPathResolution("foo", "bar")
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
g.Assert(err.Error()).Equal("filesystem: server path [foo] resolves to a location outside the server root: bar")
g.Assert(IsErrorCode(&Error{code: ErrCodeIsDirectory}, ErrCodePathResolution)).IsFalse()
})
g.It("returns <empty> if no destination path is provided", func() {
err := NewBadPathResolution("foo", "")
g.Assert(err).IsNotNil()
g.Assert(err.Error()).Equal("filesystem: server path [foo] resolves to a location outside the server root: <empty>")
})
})
}

527
server/filesystem/filesystem.go Executable file → Normal file
View File

@@ -3,7 +3,7 @@ package filesystem
import (
"bufio"
"io"
fs2 "io/fs"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -11,11 +11,12 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gabriel-vasile/mimetype"
"github.com/karrick/godirwalk"
ignore "github.com/sabhiram/go-gitignore"
"github.com/pterodactyl/wings/config"
@@ -34,61 +35,35 @@ type Filesystem struct {
diskLimit int64
// The root data directory path for this Filesystem instance.
root *os.Root
rootPath string
root string
isTest bool
}
// New creates a new Filesystem instance for a given server.
func New(path string, size int64, denylist []string) (*Filesystem, error) {
r, err := os.OpenRoot(path)
if err != nil {
return nil, errors.Wrap(err, "server/filesystem: failed to open root")
}
fs := &Filesystem{
root: r,
rootPath: path,
func New(root string, size int64, denylist []string) *Filesystem {
return &Filesystem{
root: root,
diskLimit: size,
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
lastLookupTime: &usageLookupTime{},
lookupInProgress: system.NewAtomicBool(false),
denylist: ignore.CompileIgnoreLines(denylist...),
}
return fs, nil
}
// normalize takes the input path, runs it through filepath.Clean and trims any
// leading forward slashes (since the os.Root method calls will fail otherwise).
// If the resulting path is an empty string, "." is returned which os.Root will
// understand as the base directory.
func normalize(path string) string {
c := strings.TrimLeft(filepath.Clean(path), string(filepath.Separator))
if c == "" {
return "."
}
return c
}
// Path returns the root path for the Filesystem instance.
func (fs *Filesystem) Path() string {
return fs.rootPath
}
// Close closes the underlying os.Root instance for the server.
func (fs *Filesystem) Close() error {
if err := fs.root.Close(); err != nil {
return errors.Wrap(err, "server/filesystem: failed to close root")
}
return nil
return fs.root
}
// File returns a reader for a file instance as well as the stat information.
func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
p = normalize(p)
st, err := fs.Stat(p)
cleaned, err := fs.SafePath(p)
if err != nil {
return nil, Stat{}, errors.WithStackIf(err)
}
st, err := fs.Stat(cleaned)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, Stat{}, newFilesystemError(ErrNotExist, err)
@@ -98,7 +73,7 @@ func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
if st.IsDir() {
return nil, Stat{}, newFilesystemError(ErrCodeIsDirectory, nil)
}
f, err := fs.root.Open(p)
f, err := os.Open(cleaned)
if err != nil {
return nil, Stat{}, errors.WithStackIf(err)
}
@@ -106,12 +81,14 @@ func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
}
// Touch acts by creating the given file and path on the disk if it is not present
// already. If it is present, the file is opened using the defaults which will truncate
// already. If it is present, the file is opened using the defaults which will truncate
// the contents. The opened file is then returned to the caller.
func (fs *Filesystem) Touch(p string, flag int, mode os.FileMode) (*os.File, error) {
p = normalize(p)
o := &fileOpener{root: fs.root}
f, err := o.open(p, flag, mode)
func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
cleaned, err := fs.SafePath(p)
if err != nil {
return nil, err
}
f, err := os.OpenFile(cleaned, flag, 0o644)
if err == nil {
return f, nil
}
@@ -123,42 +100,45 @@ func (fs *Filesystem) Touch(p string, flag int, mode os.FileMode) (*os.File, err
return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file handle")
}
// Only create and chown the directory if it doesn't exist.
if _, err := fs.root.Stat(filepath.Dir(p)); errors.Is(err, os.ErrNotExist) {
if _, err := os.Stat(filepath.Dir(cleaned)); errors.Is(err, os.ErrNotExist) {
// Create the path leading up to the file we're trying to create, setting the final perms
// on it as we go.
if err := fs.root.MkdirAll(filepath.Dir(p), 0o755); err != nil {
return nil, errors.WrapIf(err, "server/filesystem: touch: failed to create directory tree")
if err := os.MkdirAll(filepath.Dir(cleaned), 0o755); err != nil {
return nil, errors.Wrap(err, "server/filesystem: touch: failed to create directory tree")
}
if err := fs.Chown(filepath.Dir(p)); err != nil {
return nil, errors.WrapIf(err, "server/filesystem: touch: failed to chown directory tree")
if err := fs.Chown(filepath.Dir(cleaned)); err != nil {
return nil, err
}
}
o := &fileOpener{}
// Try to open the file now that we have created the pathing necessary for it, and then
// Chown that file so that the permissions don't mess with things.
f, err = o.open(p, flag, mode)
f, err = o.open(cleaned, flag, 0o644)
if err != nil {
return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file handle")
return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file with wait")
}
_ = fs.Chown(p)
_ = fs.Chown(cleaned)
return f, nil
}
// Writefile writes a file to the system. If the file does not already exist one
// will be created. This will also properly recalculate the disk space used by
// the server when writing new files or modifying existing ones.
//
// deprecated 1.12.1 prefer the use of Filesystem.Write()
func (fs *Filesystem) Writefile(p string, r io.Reader) error {
p = normalize(p)
cleaned, err := fs.SafePath(p)
if err != nil {
return err
}
var currentSize int64
// If the file does not exist on the system already go ahead and create the pathway
// to it and an empty file. We'll then write to it later on after this completes.
stat, err := fs.root.Stat(p)
stat, err := os.Stat(cleaned)
if err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
} else if err == nil {
if stat.IsDir() {
return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: stat.Name()})
return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: cleaned})
}
currentSize = stat.Size()
}
@@ -173,7 +153,7 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
// Touch the file and return the handle to it at this point. This will create the file,
// any necessary directories, and set the proper owner of the file.
file, err := fs.Touch(p, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644)
file, err := fs.Touch(cleaned, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
if err != nil {
return err
}
@@ -185,91 +165,125 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
// Adjust the disk usage to account for the old size and the new size of the file.
fs.addDisk(sz - currentSize)
return fs.Chown(p)
return fs.unsafeChown(cleaned)
}
func (fs *Filesystem) Mkdir(p string, mode os.FileMode) error {
if err := fs.root.Mkdir(normalize(p), mode); err != nil {
return errors.Wrap(err, "server/filesystem: mkdir: failed to make directory")
}
return nil
}
// Write writes a file to the disk.
func (fs *Filesystem) Write(p string, r io.Reader, newSize int64, mode os.FileMode) error {
st, err := fs.root.Stat(normalize(p))
// Creates a new directory (name) at a specified path (p) for the server.
func (fs *Filesystem) CreateDirectory(name string, p string) error {
cleaned, err := fs.SafePath(path.Join(p, name))
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
return errors.Wrap(err, "server/filesystem: write: failed to stat file")
}
}
var c int64
if err == nil {
if st.IsDir() {
return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: normalize(p)})
}
c = st.Size()
}
if err := fs.HasSpaceFor(newSize - c); err != nil {
return err
}
f, err := fs.Touch(p, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
return errors.Wrap(err, "server/filesystem: write: failed to touch file")
}
defer f.Close()
if newSize == 0 {
fs.addDisk(-c)
} else {
// Do not use CopyBuffer here; it is wasteful as the file implements
// io.ReaderFrom, which causes it to not use the buffer anyway.
n, err := io.Copy(f, io.LimitReader(r, newSize))
// Always adjust the disk to account for cases where a partial copy occurs
// and there is some new content on the disk.
fs.addDisk(n - c)
if err != nil {
return errors.Wrap(err, "server/filesystem: write: failed to write file")
}
}
// todo: might be unnecessary due to the `fs.Touch` call already doing this?
return fs.Chown(p)
}
// CreateDirectory creates a new directory ("name") at a specified path ("p") for the server.
func (fs *Filesystem) CreateDirectory(name string, p string) error {
return fs.root.MkdirAll(path.Join(normalize(p), name), 0o755)
return os.MkdirAll(cleaned, 0o755)
}
// Rename moves (or renames) a file or directory.
func (fs *Filesystem) Rename(from string, to string) error {
to = normalize(to)
from = normalize(from)
cleanedFrom, err := fs.SafePath(from)
if err != nil {
return errors.WithStack(err)
}
if from == "." || to == "." {
cleanedTo, err := fs.SafePath(to)
if err != nil {
return errors.WithStack(err)
}
// If the target file or directory already exists the rename function will fail, so just
// bail out now.
if _, err := os.Stat(cleanedTo); err == nil {
return os.ErrExist
}
// If the target file or directory already exists the rename function will
// fail, so just bail out now.
if _, err := fs.root.Stat(to); err == nil {
return os.ErrExist
if cleanedTo == fs.Path() {
return errors.New("attempting to rename into an invalid directory space")
}
d := strings.TrimLeft(filepath.Dir(to), "/")
d := strings.TrimSuffix(cleanedTo, path.Base(cleanedTo))
// Ensure that the directory we're moving into exists correctly on the system. Only do this if
// we're not at the root directory level.
if d != "" {
if err := fs.root.MkdirAll(d, 0o755); err != nil {
return errors.Wrap(err, "server/filesystem: failed to create directory tree")
if d != fs.Path() {
if mkerr := os.MkdirAll(d, 0o755); mkerr != nil {
return errors.WithMessage(mkerr, "failed to create directory structure for file rename")
}
}
return fs.root.Rename(from, to)
if err := os.Rename(cleanedFrom, cleanedTo); err != nil {
return errors.WithStack(err)
}
return nil
}
// Recursively iterates over a file or directory and sets the permissions on all of the
// underlying files. Iterate over all of the files and directories. If it is a file just
// go ahead and perform the chown operation. Otherwise dig deeper into the directory until
// we've run out of directories to dig into.
func (fs *Filesystem) Chown(path string) error {
cleaned, err := fs.SafePath(path)
if err != nil {
return err
}
return fs.unsafeChown(cleaned)
}
// unsafeChown chowns the given path, without checking if the path is safe. This should only be used
// when the path has already been checked.
func (fs *Filesystem) unsafeChown(path string) error {
if fs.isTest {
return nil
}
uid := config.Get().System.User.Uid
gid := config.Get().System.User.Gid
// Start by just chowning the initial path that we received.
if err := os.Chown(path, uid, gid); err != nil {
return errors.Wrap(err, "server/filesystem: chown: failed to chown path")
}
// If this is not a directory we can now return from the function, there is nothing
// left that we need to do.
if st, err := os.Stat(path); err != nil || !st.IsDir() {
return nil
}
// If this was a directory, begin walking over its contents recursively and ensure that all
// of the subfiles and directories get their permissions updated as well.
err := godirwalk.Walk(path, &godirwalk.Options{
Unsorted: true,
Callback: func(p string, e *godirwalk.Dirent) error {
// Do not attempt to chown a symlink. Go's os.Chown function will affect the symlink
// so if it points to a location outside the data directory the user would be able to
// (un)intentionally modify that files permissions.
if e.IsSymlink() {
if e.IsDir() {
return godirwalk.SkipThis
}
return nil
}
return os.Chown(p, uid, gid)
},
})
return errors.Wrap(err, "server/filesystem: chown: failed to chown during walk function")
}
func (fs *Filesystem) Chmod(path string, mode os.FileMode) error {
cleaned, err := fs.SafePath(path)
if err != nil {
return err
}
if fs.isTest {
return nil
}
if err := os.Chmod(cleaned, mode); err != nil {
return err
}
return nil
}
// Begin looping up to 50 times to try and create a unique copy file name. This will take
@@ -311,8 +325,12 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
// Copies a given file to the same location and appends a suffix to the file to indicate that
// it has been copied.
func (fs *Filesystem) Copy(p string) error {
p = normalize(p)
s, err := fs.root.Stat(p)
cleaned, err := fs.SafePath(p)
if err != nil {
return err
}
s, err := os.Stat(cleaned)
if err != nil {
return err
} else if s.IsDir() || !s.Mode().IsRegular() {
@@ -326,8 +344,8 @@ func (fs *Filesystem) Copy(p string) error {
return err
}
base := filepath.Base(p)
relative := strings.TrimSuffix(strings.TrimPrefix(p, fs.Path()), base)
base := filepath.Base(cleaned)
relative := strings.TrimSuffix(strings.TrimPrefix(cleaned, fs.Path()), base)
extension := filepath.Ext(base)
name := strings.TrimSuffix(base, extension)
@@ -339,7 +357,7 @@ func (fs *Filesystem) Copy(p string) error {
name = strings.TrimSuffix(name, ".tar")
}
source, err := fs.root.Open(p)
source, err := os.Open(cleaned)
if err != nil {
return err
}
@@ -353,134 +371,114 @@ func (fs *Filesystem) Copy(p string) error {
return fs.Writefile(path.Join(relative, n), source)
}
// Symlink creates a symbolic link between the source and target paths. [os.Root].Symlink
// allows for the creation of a symlink that targets a file outside the root directory.
// This isn't the end of the world because the read is blocked through this system, and
// within a container it would just point to something in the readonly filesystem.
//
// There are also valid use-cases where a symlink might need to point to a file outside
// the server data directory for a server to operate correctly. Since everything in the
// filesystem runs through os.Root though we're protected from accidentally reading a
// sensitive file on the _host_ OS.
func (fs *Filesystem) Symlink(source, target string) error {
source = normalize(source)
target = normalize(target)
// os.Root#Symlink allows for the creation of a symlink that targets a file outside
// the root directory. This isn't the end of the world because the read is blocked
// through this system, and within a container it would just point to something in the
// readonly filesystem.
//
// However, just to avoid this propagating everywhere, *attempt* to block anything that
// would be pointing to a location outside the root directory.
if _, err := fs.root.Stat(source); err != nil {
return errors.Wrap(err, "server/filesystem: symlink: failed to stat source")
}
// Yes -- this gap between the stat and symlink allows a TOCTOU vulnerability to exist,
// but again we're layering this with the remaining logic that prevents this filesystem
// from reading any symlinks or acting on any file that points outside the root as defined
// by os.Root. The check above is mostly to prevent stupid mistakes or basic attempts to
// get around this. If someone *really* wants to make these symlinks, they can. They can
// also just create them from the running server process, and we still need to rely on our
// own internal FS logic to detect and block those reads, which it does. Therefore, I am
// not deeply concerned with this.
if err := fs.root.Symlink(source, target); err != nil {
return errors.Wrap(err, "server/filesystem: symlink: failed to create symlink")
}
return nil
}
// ReadDir returns all the contents of the given directory.
func (fs *Filesystem) ReadDir(p string) ([]fs2.DirEntry, error) {
d, ok := fs.root.FS().(fs2.ReadDirFS)
if !ok {
return []fs2.DirEntry{}, errors.New("server/filesystem: readdir: could not init root fs")
}
e, err := d.ReadDir(normalize(p))
if err != nil {
return []fs2.DirEntry{}, errors.Wrap(err, "server/filesystem: readdir: failed to read directory")
}
return e, nil
}
// TruncateRootDirectory removes _all_ files and directories from a server's
// data directory and resets the used disk space to zero.
func (fs *Filesystem) TruncateRootDirectory() error {
err := filepath.WalkDir(fs.rootPath, func(path string, d fs2.DirEntry, err error) error {
p := normalize(strings.TrimPrefix(path, fs.rootPath))
if p == "." {
return nil
}
if err := fs.root.RemoveAll(p); err != nil {
return err
}
if d.IsDir() {
return filepath.SkipDir
}
return nil
})
if err != nil {
go func() {
// If there was an error, re-calculate the disk usage right away to account
// for any partially removed files.
_, _ = fs.updateCachedDiskUsage()
}()
return errors.Wrap(err, "server/filesystem: truncate: failed to walk root directory")
if err := os.RemoveAll(fs.Path()); err != nil {
return err
}
// Set the disk space back to zero.
fs.addDisk(fs.diskUsed * -1)
if err := os.Mkdir(fs.Path(), 0o755); err != nil {
return err
}
atomic.StoreInt64(&fs.diskUsed, 0)
return nil
}
// Delete removes a file or folder from the system. Prevents the user from
// accidentally (or maliciously) removing their root server data directory.
func (fs *Filesystem) Delete(p string) error {
p = normalize(p)
if p == "." {
return errors.New("server/filesystem: delete: cannot delete root directory")
// This is one of the few (only?) places in the codebase where we're explicitly not using
// the SafePath functionality when working with user provided input. If we did, you would
// not be able to delete a file that is a symlink pointing to a location outside the data
// directory.
//
// We also want to avoid resolving a symlink that points _within_ the data directory and thus
// deleting the actual source file for the symlink rather than the symlink itself. For these
// purposes just resolve the actual file path using filepath.Join() and confirm that the path
// exists within the data directory.
resolved := fs.unsafeFilePath(p)
if !fs.unsafeIsInDataDirectory(resolved) {
return NewBadPathResolution(p, resolved)
}
st, err := fs.root.Lstat(p)
// Block any whoopsies.
if resolved == fs.Path() {
return errors.New("cannot delete root server directory")
}
st, err := os.Lstat(resolved)
if err != nil {
if os.IsNotExist(err) {
return nil
if !os.IsNotExist(err) {
fs.error(err).Warn("error while attempting to stat file before deletion")
return err
}
// The following logic is used to handle a case where a user attempts to
// delete a file that does not exist through a directory symlink.
// We don't want to reveal that the file does not exist, so we validate
// the path of the symlink and return a bad path error if it is invalid.
// The requested file or directory doesn't exist, so at this point we
// need to iterate up the path chain until we hit a directory that
// _does_ exist and can be validated.
parts := strings.Split(filepath.Dir(resolved), "/")
// Range over all the path parts and form directory paths from the end
// moving up until we have a valid resolution, or we run out of paths to
// try.
for k := range parts {
try := strings.Join(parts[:(len(parts)-k)], "/")
if !fs.unsafeIsInDataDirectory(try) {
break
}
t, err := filepath.EvalSymlinks(try)
if err == nil {
if !fs.unsafeIsInDataDirectory(t) {
return NewBadPathResolution(p, t)
}
break
}
}
// Always return early if the file does not exist.
return nil
}
// If the file is not a symlink, we need to check that it is not within a
// symlinked directory that points outside the data directory.
if st.Mode()&os.ModeSymlink == 0 {
ep, err := filepath.EvalSymlinks(resolved)
if err != nil {
if !os.IsNotExist(err) {
return err
}
} else if !fs.unsafeIsInDataDirectory(ep) {
return NewBadPathResolution(p, ep)
}
return errors.Wrap(err, "server/filesystem: delete: failed to stat file")
}
if st.IsDir() {
if s, err := fs.DirectorySize(p); err == nil {
if s, err := fs.DirectorySize(resolved); err == nil {
fs.addDisk(-s)
}
} else {
fs.addDisk(-st.Size())
}
return fs.root.RemoveAll(p)
return os.RemoveAll(resolved)
}
type fileOpener struct {
busy uint
root *os.Root
}
// Attempts to open a given file up to "attempts" number of times, using a backoff. If the file
// cannot be opened because of a "text file busy" error, we will attempt until the number of attempts
// has been exhaused, at which point we will abort with an error.
func (fo *fileOpener) open(path string, flags int, mode os.FileMode) (*os.File, error) {
func (fo *fileOpener) open(path string, flags int, perm os.FileMode) (*os.File, error) {
for {
f, err := fo.root.OpenFile(path, flags, mode)
f, err := os.OpenFile(path, flags, perm)
// If there is an error because the text file is busy, go ahead and sleep for a few
// hundred milliseconds and then try again up to three times before just returning the
@@ -497,14 +495,17 @@ func (fo *fileOpener) open(path string, flags int, mode os.FileMode) (*os.File,
}
}
// ListDirectory lists the contents of a given directory and returns stat information
// about each file and folder within it. If you only need to know the contents of the
// directory and do not need mimetype information, call [Filesystem.ReadDir] directly
// instead.
// ListDirectory lists the contents of a given directory and returns stat
// information about each file and folder within it.
func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
files, err := fs.ReadDir(p)
cleaned, err := fs.SafePath(p)
if err != nil {
return []Stat{}, err
return nil, err
}
files, err := ioutil.ReadDir(cleaned)
if err != nil {
return nil, err
}
var wg sync.WaitGroup
@@ -514,46 +515,39 @@ func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
// break the panel badly.
out := make([]Stat, len(files))
// Iterate over all the files and directories returned and perform an async process
// Iterate over all of the files and directories returned and perform an async process
// to get the mime-type for them all.
for i, file := range files {
wg.Add(1)
go func(idx int, d fs2.DirEntry) {
go func(idx int, f os.FileInfo) {
defer wg.Done()
fi, err := d.Info()
if err != nil {
log.WithField("error", err).WithField("path", filepath.Join(p, d.Name())).Warn("failed to retrieve directory entry info")
return
}
var m *mimetype.MIME
d := "inode/directory"
if !f.IsDir() {
cleanedp := filepath.Join(cleaned, f.Name())
if f.Mode()&os.ModeSymlink != 0 {
cleanedp, _ = fs.SafePath(filepath.Join(cleaned, f.Name()))
}
if fi.IsDir() {
out[idx] = Stat{FileInfo: fi, Mimetype: "inode/directory"}
return
}
st := Stat{FileInfo: fi, Mimetype: "application/octet-stream"}
// Don't try to detect the type on a pipe — this will just hang the application,
// and you'll never get a response back.
//
// @see https://github.com/pterodactyl/panel/issues/4059
if fi.Mode()&os.ModeNamedPipe == 0 {
if f, err := fs.root.Open(normalize(filepath.Join(p, d.Name()))); err != nil {
if !IsPathError(err) && !IsLinkError(err) {
log.WithField("error", err).WithField("path", filepath.Join(p, d.Name())).Warn("error opening file for mimetype detection")
}
// Don't try to detect the type on a pipe — this will just hang the application and
// you'll never get a response back.
//
// @see https://github.com/pterodactyl/panel/issues/4059
if cleanedp != "" && f.Mode()&os.ModeNamedPipe == 0 {
m, _ = mimetype.DetectFile(filepath.Join(cleaned, f.Name()))
} else {
if m, err := mimetype.DetectReader(f); err == nil {
st.Mimetype = m.String()
} else {
log.WithField("error", err).WithField("path", filepath.Join(p, d.Name())).Warn("failed to detect mimetype for file")
}
_ = f.Close()
// Just pass this for an unknown type because the file could not safely be resolved within
// the server data path.
d = "application/octet-stream"
}
}
st := Stat{FileInfo: f, Mimetype: d}
if m != nil {
st.Mimetype = m.String()
}
out[idx] = st
}(i, file)
}
@@ -577,3 +571,20 @@ func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
return out, nil
}
func (fs *Filesystem) Chtimes(path string, atime, mtime time.Time) error {
cleaned, err := fs.SafePath(path)
if err != nil {
return err
}
if fs.isTest {
return nil
}
if err := os.Chtimes(cleaned, atime, mtime); err != nil {
return err
}
return nil
}

377
server/filesystem/filesystem_test.go Executable file → Normal file
View File

@@ -7,7 +7,6 @@ import (
"math/rand"
"os"
"path/filepath"
"strings"
"sync/atomic"
"testing"
"unicode/utf8"
@@ -17,12 +16,7 @@ import (
"github.com/pterodactyl/wings/config"
)
type testFs struct {
*Filesystem
tmpDir string
}
func NewFs() *testFs {
func NewFs() (*Filesystem, *rootFs) {
config.Set(&config.Configuration{
AuthenticationToken: "abc",
System: config.SystemConfiguration{
@@ -35,17 +29,20 @@ func NewFs() *testFs {
if err != nil {
panic(err)
}
// defer os.RemoveAll(tmpDir)
fs, err := New(tmpDir, 0, []string{})
if err != nil {
panic(err)
}
rfs := rootFs{root: tmpDir}
rfs.reset()
fs := New(filepath.Join(tmpDir, "/server"), 0, []string{})
fs.isTest = true
tfs := &testFs{Filesystem: fs, tmpDir: tmpDir}
tfs.reset()
return fs, &rfs
}
return tfs
type rootFs struct {
root string
}
func getFileContent(file *os.File) string {
@@ -56,53 +53,40 @@ func getFileContent(file *os.File) string {
return w.String()
}
func (tfs *testFs) reset() {
if err := tfs.root.Close(); err != nil {
panic(err)
}
if !strings.HasPrefix(tfs.tmpDir, "/tmp/pterodactyl") {
panic("filesystem_test: attempting to delete outside root directory: " + tfs.tmpDir)
func (rfs *rootFs) CreateServerFile(p string, c []byte) error {
f, err := os.Create(filepath.Join(rfs.root, "/server", p))
if err == nil {
f.Write(c)
f.Close()
}
if err := os.RemoveAll(tfs.tmpDir); err != nil {
return err
}
func (rfs *rootFs) CreateServerFileFromString(p string, c string) error {
return rfs.CreateServerFile(p, []byte(c))
}
func (rfs *rootFs) StatServerFile(p string) (os.FileInfo, error) {
return os.Stat(filepath.Join(rfs.root, "/server", p))
}
func (rfs *rootFs) reset() {
if err := os.RemoveAll(filepath.Join(rfs.root, "/server")); err != nil {
if !os.IsNotExist(err) {
panic(err)
}
}
if !strings.HasPrefix(tfs.rootPath, tfs.tmpDir) {
panic("filesystem_test: mismatch between root and tmp paths")
}
tfs.rootPath = filepath.Join(tfs.tmpDir, "/server")
if err := os.MkdirAll(tfs.rootPath, 0o755); err != nil {
panic(err)
}
r, err := os.OpenRoot(tfs.rootPath)
if err != nil {
panic(err)
}
tfs.root = r
}
func (tfs *testFs) write(name string, data []byte) {
p := filepath.Clean(filepath.Join(tfs.rootPath, name))
// Ensure we're always writing into a location that would also be cleaned up
// by the reset() function.
if !strings.HasPrefix(p, filepath.Dir(tfs.rootPath)) {
panic("filesystem_test: attempting to write outside of root directory: " + p)
}
if err := os.WriteFile(filepath.Join(tfs.rootPath, name), data, 0o644); err != nil {
if err := os.Mkdir(filepath.Join(rfs.root, "/server"), 0o755); err != nil {
panic(err)
}
}
func TestFilesystem_Openfile(t *testing.T) {
g := Goblin(t)
fs := NewFs()
fs, rfs := NewFs()
g.Describe("File", func() {
g.It("returns custom error when file does not exist", func() {
@@ -113,7 +97,7 @@ func TestFilesystem_Openfile(t *testing.T) {
})
g.It("returns file stat information", func() {
fs.write("foo.txt", []byte("hello world"))
_ = rfs.CreateServerFile("foo.txt", []byte("hello world"))
f, st, err := fs.File("foo.txt")
g.Assert(err).IsNil()
@@ -124,14 +108,14 @@ func TestFilesystem_Openfile(t *testing.T) {
})
g.AfterEach(func() {
fs.reset()
rfs.reset()
})
})
}
func TestFilesystem_Writefile(t *testing.T) {
g := Goblin(t)
fs := NewFs()
fs, rfs := NewFs()
g.Describe("Open and WriteFile", func() {
buf := &bytes.Buffer{}
@@ -180,13 +164,9 @@ func TestFilesystem_Writefile(t *testing.T) {
g.It("cannot create a file outside the root directory", func() {
r := bytes.NewReader([]byte("test file content"))
err := fs.Writefile("../../etc/test.txt", r)
err := fs.Writefile("/some/../foo/../../test.txt", r)
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
err = fs.Writefile("a/../../../test.txt", r)
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("cannot write a file that exceeds the disk limits", func() {
@@ -220,7 +200,7 @@ func TestFilesystem_Writefile(t *testing.T) {
g.AfterEach(func() {
buf.Truncate(0)
fs.reset()
rfs.reset()
atomic.StoreInt64(&fs.diskUsed, 0)
atomic.StoreInt64(&fs.diskLimit, 0)
@@ -230,14 +210,14 @@ func TestFilesystem_Writefile(t *testing.T) {
func TestFilesystem_CreateDirectory(t *testing.T) {
g := Goblin(t)
fs := NewFs()
fs, rfs := NewFs()
g.Describe("CreateDirectory", func() {
g.It("should create missing directories automatically", func() {
err := fs.CreateDirectory("test", "foo/bar/baz")
g.Assert(err).IsNil()
st, err := os.Stat(filepath.Join(fs.rootPath, "foo/bar/baz/test"))
st, err := rfs.StatServerFile("foo/bar/baz/test")
g.Assert(err).IsNil()
g.Assert(st.IsDir()).IsTrue()
g.Assert(st.Name()).Equal("test")
@@ -247,7 +227,7 @@ func TestFilesystem_CreateDirectory(t *testing.T) {
err := fs.CreateDirectory("test", "/foozie/barzie/bazzy/")
g.Assert(err).IsNil()
st, err := os.Stat(filepath.Join(fs.rootPath, "foozie/barzie/bazzy/test"))
st, err := rfs.StatServerFile("foozie/barzie/bazzy/test")
g.Assert(err).IsNil()
g.Assert(st.IsDir()).IsTrue()
g.Assert(st.Name()).Equal("test")
@@ -256,7 +236,7 @@ func TestFilesystem_CreateDirectory(t *testing.T) {
g.It("should not allow the creation of directories outside the root", func() {
err := fs.CreateDirectory("test", "e/../../something")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("should not increment the disk usage", func() {
@@ -266,24 +246,27 @@ func TestFilesystem_CreateDirectory(t *testing.T) {
})
g.AfterEach(func() {
fs.reset()
rfs.reset()
})
})
}
func TestFilesystem_Rename(t *testing.T) {
g := Goblin(t)
fs := NewFs()
fs, rfs := NewFs()
g.Describe("Rename", func() {
g.BeforeEach(func() {
fs.write("source.txt", []byte("text content"))
if err := rfs.CreateServerFileFromString("source.txt", "text content"); err != nil {
panic(err)
}
})
g.It("returns an error if the target already exists", func() {
fs.write("target.txt", []byte("taget content"))
err := rfs.CreateServerFileFromString("target.txt", "taget content")
g.Assert(err).IsNil()
err := fs.Rename("source.txt", "target.txt")
err = fs.Rename("source.txt", "target.txt")
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, os.ErrExist)).IsTrue()
})
@@ -295,7 +278,7 @@ func TestFilesystem_Rename(t *testing.T) {
})
g.It("returns an error if the source destination is the root directory", func() {
err := fs.Rename("/", "destination.txt")
err := fs.Rename("source.txt", "/")
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, os.ErrExist)).IsTrue()
})
@@ -303,42 +286,43 @@ func TestFilesystem_Rename(t *testing.T) {
g.It("does not allow renaming to a location outside the root", func() {
err := fs.Rename("source.txt", "../target.txt")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("does not allow renaming from a location outside the root", func() {
err := fs.Rename("../ext-source.txt", "target.txt")
err := rfs.CreateServerFileFromString("/../ext-source.txt", "taget content")
err = fs.Rename("/../ext-source.txt", "target.txt")
g.Assert(err).IsNotNil()
g.Assert(IsLinkError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("allows a file to be renamed", func() {
err := fs.Rename("source.txt", "target.txt")
g.Assert(err).IsNil()
_, err = os.Stat(filepath.Join(fs.rootPath, "source.txt"))
_, err = rfs.StatServerFile("source.txt")
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
st, err := os.Stat(filepath.Join(fs.rootPath, "target.txt"))
st, err := rfs.StatServerFile("target.txt")
g.Assert(err).IsNil()
g.Assert(st.Name()).Equal("target.txt")
g.Assert(st.Size()).IsNotZero()
})
g.It("allows a folder to be renamed", func() {
if err := os.Mkdir(filepath.Join(fs.rootPath, "/source_dir"), 0o755); err != nil {
panic(err)
}
err := fs.Rename("source_dir", "target_dir")
err := os.Mkdir(filepath.Join(rfs.root, "/server/source_dir"), 0o755)
g.Assert(err).IsNil()
_, err = os.Stat(filepath.Join(fs.rootPath, "source_dir"))
err = fs.Rename("source_dir", "target_dir")
g.Assert(err).IsNil()
_, err = rfs.StatServerFile("source_dir")
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
st, err := os.Stat(filepath.Join(fs.rootPath, "target_dir"))
st, err := rfs.StatServerFile("target_dir")
g.Assert(err).IsNil()
g.Assert(st.IsDir()).IsTrue()
})
@@ -353,24 +337,27 @@ func TestFilesystem_Rename(t *testing.T) {
err := fs.Rename("source.txt", "nested/folder/target.txt")
g.Assert(err).IsNil()
st, err := os.Stat(filepath.Join(fs.rootPath, "nested/folder/target.txt"))
st, err := rfs.StatServerFile("nested/folder/target.txt")
g.Assert(err).IsNil()
g.Assert(st.Name()).Equal("target.txt")
})
g.AfterEach(func() {
fs.reset()
rfs.reset()
})
})
}
func TestFilesystem_Copy(t *testing.T) {
g := Goblin(t)
fs := NewFs()
fs, rfs := NewFs()
g.Describe("Copy", func() {
g.BeforeEach(func() {
fs.write("source.txt", []byte("text content"))
if err := rfs.CreateServerFileFromString("source.txt", "text content"); err != nil {
panic(err)
}
atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")))
})
@@ -381,23 +368,31 @@ func TestFilesystem_Copy(t *testing.T) {
})
g.It("should return an error if the source is outside the root", func() {
err := fs.Copy("../ext-source.txt")
err := rfs.CreateServerFileFromString("/../ext-source.txt", "text content")
err = fs.Copy("../ext-source.txt")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("should return an error if the source directory is outside the root", func() {
err := fs.Copy("../nested/in/dir/ext-source.txt")
err := os.MkdirAll(filepath.Join(rfs.root, "/nested/in/dir"), 0o755)
g.Assert(err).IsNil()
err = rfs.CreateServerFileFromString("/../nested/in/dir/ext-source.txt", "external content")
g.Assert(err).IsNil()
err = fs.Copy("../nested/in/dir/ext-source.txt")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
err = fs.Copy("nested/in/../../../nested/in/dir/ext-source.txt")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("should return an error if the source is a directory", func() {
err := os.Mkdir(filepath.Join(fs.rootPath, "/dir"), 0o755)
err := os.Mkdir(filepath.Join(rfs.root, "/server/dir"), 0o755)
g.Assert(err).IsNil()
err = fs.Copy("dir")
@@ -417,10 +412,10 @@ func TestFilesystem_Copy(t *testing.T) {
err := fs.Copy("source.txt")
g.Assert(err).IsNil()
_, err = os.Stat(filepath.Join(fs.rootPath, "source.txt"))
_, err = rfs.StatServerFile("source.txt")
g.Assert(err).IsNil()
_, err = os.Stat(filepath.Join(fs.rootPath, "source copy.txt"))
_, err = rfs.StatServerFile("source copy.txt")
g.Assert(err).IsNil()
})
@@ -434,7 +429,7 @@ func TestFilesystem_Copy(t *testing.T) {
r := []string{"source.txt", "source copy.txt", "source copy 1.txt"}
for _, name := range r {
_, err = os.Stat(filepath.Join(fs.rootPath, name))
_, err = rfs.StatServerFile(name)
g.Assert(err).IsNil()
}
@@ -442,24 +437,24 @@ func TestFilesystem_Copy(t *testing.T) {
})
g.It("should create a copy inside of a directory", func() {
if err := os.MkdirAll(filepath.Join(fs.rootPath, "/nested/in/dir"), 0o755); err != nil {
panic(err)
}
fs.write("nested/in/dir/source.txt", []byte("test content"))
err := fs.Copy("nested/in/dir/source.txt")
err := os.MkdirAll(filepath.Join(rfs.root, "/server/nested/in/dir"), 0o755)
g.Assert(err).IsNil()
_, err = os.Stat(filepath.Join(fs.rootPath, "nested/in/dir/source.txt"))
err = rfs.CreateServerFileFromString("nested/in/dir/source.txt", "test content")
g.Assert(err).IsNil()
_, err = os.Stat(filepath.Join(fs.rootPath, "nested/in/dir/source copy.txt"))
err = fs.Copy("nested/in/dir/source.txt")
g.Assert(err).IsNil()
_, err = rfs.StatServerFile("nested/in/dir/source.txt")
g.Assert(err).IsNil()
_, err = rfs.StatServerFile("nested/in/dir/source copy.txt")
g.Assert(err).IsNil()
})
g.AfterEach(func() {
fs.reset()
rfs.reset()
atomic.StoreInt64(&fs.diskUsed, 0)
atomic.StoreInt64(&fs.diskLimit, 0)
@@ -467,115 +462,38 @@ func TestFilesystem_Copy(t *testing.T) {
})
}
func TestFilesystem_Symlink(t *testing.T) {
g := Goblin(t)
fs := NewFs()
g.Describe("Symlink", func() {
g.It("should create a symlink", func() {
fs.write("source.txt", []byte("text content"))
err := fs.Symlink("source.txt", "symlink.txt")
g.Assert(err).IsNil()
st, err := os.Lstat(filepath.Join(fs.rootPath, "symlink.txt"))
g.Assert(err).IsNil()
g.Assert(st.Mode()&os.ModeSymlink != 0).IsTrue()
})
g.It("should return an error if the source is outside the root", func() {
err := fs.Symlink("../source.txt", "symlink.txt")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
})
g.It("should return an error if the dest is outside the root", func() {
fs.write("source.txt", []byte("text content"))
err := fs.Symlink("source.txt", "../symlink.txt")
g.Assert(err).IsNotNil()
g.Assert(IsLinkError(err)).IsTrue()
})
g.AfterEach(func() {
fs.reset()
})
})
}
func TestFilesystem_ReadDir(t *testing.T) {
g := Goblin(t)
fs := NewFs()
g.Describe("ReadDir", func() {
g.Before(func() {
if err := os.Mkdir(filepath.Join(fs.rootPath, "child"), 0o755); err != nil {
panic(err)
}
fs.write("one.txt", []byte("one"))
fs.write("two.txt", []byte("two"))
fs.write("child/three.txt", []byte("two"))
})
g.After(func() {
fs.reset()
})
g.It("should return the contents of the root directory", func() {
d, err := fs.ReadDir("/")
g.Assert(err).IsNil()
g.Assert(len(d)).Equal(3)
// os.Root#ReadDir sorts them by name.
g.Assert(d[0].Name()).Equal("child")
g.Assert(d[0].IsDir()).IsTrue()
g.Assert(d[1].Name()).Equal("one.txt")
g.Assert(d[2].Name()).Equal("two.txt")
})
g.It("should return the contents of a child directory", func() {
d, err := fs.ReadDir("child")
g.Assert(err).IsNil()
g.Assert(len(d)).Equal(1)
g.Assert(d[0].Name()).Equal("three.txt")
})
g.It("should return an error if the directory is outside the root", func() {
_, err := fs.ReadDir("../server")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
})
})
}
func TestFilesystem_Delete(t *testing.T) {
g := Goblin(t)
fs := NewFs()
fs, rfs := NewFs()
g.Describe("Delete", func() {
g.BeforeEach(func() {
fs.write("source.txt", []byte("text content"))
if err := rfs.CreateServerFileFromString("source.txt", "test content"); err != nil {
panic(err)
}
atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")))
})
g.It("does not delete files outside the root directory", func() {
err := fs.Delete("../ext-source.txt")
err := rfs.CreateServerFileFromString("/../ext-source.txt", "external content")
err = fs.Delete("../ext-source.txt")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("does not allow the deletion of the root directory", func() {
err := fs.Delete("/")
g.Assert(err).IsNotNil()
g.Assert(err.Error()).Equal("server/filesystem: delete: cannot delete root directory")
g.Assert(err.Error()).Equal("cannot delete root server directory")
})
g.It("does not return an error if the target does not exist", func() {
err := fs.Delete("missing.txt")
g.Assert(err).IsNil()
st, err := os.Stat(filepath.Join(fs.rootPath, "source.txt"))
st, err := rfs.StatServerFile("source.txt")
g.Assert(err).IsNil()
g.Assert(st.Name()).Equal("source.txt")
})
@@ -584,7 +502,7 @@ func TestFilesystem_Delete(t *testing.T) {
err := fs.Delete("source.txt")
g.Assert(err).IsNil()
_, err = os.Stat(filepath.Join(fs.rootPath, "source.txt"))
_, err = rfs.StatServerFile("source.txt")
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
@@ -598,106 +516,103 @@ func TestFilesystem_Delete(t *testing.T) {
"foo/bar/baz/source.txt",
}
if err := os.MkdirAll(filepath.Join(fs.rootPath, "/foo/bar/baz"), 0o755); err != nil {
panic(err)
}
err := os.MkdirAll(filepath.Join(rfs.root, "/server/foo/bar/baz"), 0o755)
g.Assert(err).IsNil()
for _, s := range sources {
fs.write(s, []byte("test content"))
err = rfs.CreateServerFileFromString(s, "test content")
g.Assert(err).IsNil()
}
atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")*3))
err := fs.Delete("foo")
err = fs.Delete("foo")
g.Assert(err).IsNil()
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
for _, s := range sources {
_, err = os.Stat(filepath.Join(fs.rootPath, s))
_, err = rfs.StatServerFile(s)
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
}
})
g.It("deletes a symlink but not it's target within the root directory", func() {
// Symlink to a file inside the root server data directory.
if err := os.Symlink(filepath.Join(fs.rootPath, "source.txt"), filepath.Join(fs.rootPath, "symlink.txt")); err != nil {
panic(err)
}
// Symlink to a file inside the root directory.
err := os.Symlink(filepath.Join(rfs.root, "server/source.txt"), filepath.Join(rfs.root, "server/symlink.txt"))
g.Assert(err).IsNil()
// Delete the symlink itself.
err := fs.Delete("symlink.txt")
err = fs.Delete("symlink.txt")
g.Assert(err).IsNil()
// Ensure the symlink was deleted.
_, err = os.Lstat(filepath.Join(fs.rootPath, "symlink.txt"))
_, err = os.Lstat(filepath.Join(rfs.root, "server/symlink.txt"))
g.Assert(err).IsNotNil()
// Ensure the symlink target still exists.
_, err = os.Lstat(filepath.Join(fs.rootPath, "source.txt"))
_, err = os.Lstat(filepath.Join(rfs.root, "server/source.txt"))
g.Assert(err).IsNil()
})
g.It("does not delete files symlinked outside of the root directory", func() {
// Create a file outside the root directory.
fs.write("../external.txt", []byte("test content"))
err := rfs.CreateServerFileFromString("/../source.txt", "test content")
g.Assert(err).IsNil()
// Create a symlink to the file outside the root directory.
if err := os.Symlink(filepath.Join(fs.rootPath, "../external.txt"), filepath.Join(fs.rootPath, "symlink.txt")); err != nil {
panic(err)
}
err = os.Symlink(filepath.Join(rfs.root, "source.txt"), filepath.Join(rfs.root, "/server/symlink.txt"))
g.Assert(err).IsNil()
// Delete the symlink. (This should pass as we will delete the symlink itself, not the target)
err := fs.Delete("symlink.txt")
// Delete the symlink. (This should pass as we will delete the symlink itself, not it's target)
err = fs.Delete("symlink.txt")
g.Assert(err).IsNil()
// Ensure the file outside the root directory still exists.
_, err = os.Lstat(filepath.Join(fs.rootPath, "../external.txt"))
_, err = os.Lstat(filepath.Join(rfs.root, "source.txt"))
g.Assert(err).IsNil()
})
g.It("does not delete files symlinked through a directory outside of the root directory", func() {
// Create a directory outside the root directory.
if err := os.Mkdir(filepath.Join(fs.rootPath, "../external"), 0o755); err != nil {
panic(err)
}
err := os.Mkdir(filepath.Join(rfs.root, "foo"), 0o755)
g.Assert(err).IsNil()
fs.write("../external/source.txt", []byte("test content"))
// Create a file inside the directory that is outside the root.
err = rfs.CreateServerFileFromString("/../foo/source.txt", "test content")
g.Assert(err).IsNil()
// Symlink the directory that is outside the root to a file inside the root.
if err := os.Symlink(filepath.Join(fs.rootPath, "../external"), filepath.Join(fs.rootPath, "/symlink")); err != nil {
panic(err)
}
err = os.Symlink(filepath.Join(rfs.root, "foo"), filepath.Join(rfs.root, "server/symlink"))
g.Assert(err).IsNil()
// Delete a file inside the symlinked directory.
err := fs.Delete("symlink/source.txt")
err = fs.Delete("symlink/source.txt")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
// Ensure the file outside the root directory still exists.
_, err = os.Lstat(filepath.Join(fs.rootPath, "../external/source.txt"))
_, err = os.Lstat(filepath.Join(rfs.root, "foo/source.txt"))
g.Assert(err).IsNil()
})
g.It("returns an error when trying to delete a non-existent file symlinked through a directory outside of the root directory", func() {
// Create a directory outside the root directory.
if err := os.Mkdir(filepath.Join(fs.rootPath, "../external"), 0o755); err != nil {
panic(err)
}
err := os.Mkdir(filepath.Join(rfs.root, "foo2"), 0o755)
g.Assert(err).IsNil()
// Symlink the directory that is outside the root to a file inside the root.
if err := os.Symlink(filepath.Join(fs.rootPath, "../external"), filepath.Join(fs.rootPath, "/symlink")); err != nil {
panic(err)
}
err = os.Symlink(filepath.Join(rfs.root, "foo2"), filepath.Join(rfs.root, "server/symlink"))
g.Assert(err).IsNil()
// Delete a file inside the symlinked directory.
err := fs.Delete("symlink/source.txt")
err = fs.Delete("symlink/source.txt")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.AfterEach(func() {
fs.reset()
rfs.reset()
atomic.StoreInt64(&fs.diskUsed, 0)
atomic.StoreInt64(&fs.diskLimit, 0)

View File

@@ -1,16 +1,134 @@
package filesystem
import (
"context"
iofs "io/fs"
"os"
"path/filepath"
"strings"
"sync"
"emperror.dev/errors"
"golang.org/x/sync/errgroup"
)
// IsIgnored checks if the given file or path is in the server's file denylist. If so, an Error
// Checks if the given file or path is in the server's file denylist. If so, an Error
// is returned, otherwise nil is returned.
func (fs *Filesystem) IsIgnored(paths ...string) error {
for _, p := range paths {
if fs.denylist.MatchesPath(p) {
return errors.WithStack(&Error{code: ErrCodeDenylistFile, path: p})
sp, err := fs.SafePath(p)
if err != nil {
return err
}
if fs.denylist.MatchesPath(sp) {
return errors.WithStack(&Error{code: ErrCodeDenylistFile, path: p, resolved: sp})
}
}
return nil
}
// Normalizes a directory being passed in to ensure the user is not able to escape
// from their data directory. After normalization if the directory is still within their home
// path it is returned. If they managed to "escape" an error will be returned.
//
// This logic is actually copied over from the SFTP server code. Ideally that eventually
// either gets ported into this application, or is able to make use of this package.
func (fs *Filesystem) SafePath(p string) (string, error) {
// Start with a cleaned up path before checking the more complex bits.
r := fs.unsafeFilePath(p)
// At the same time, evaluate the symlink status and determine where this file or folder
// is truly pointing to.
ep, err := filepath.EvalSymlinks(r)
if err != nil && !os.IsNotExist(err) {
return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
} else if os.IsNotExist(err) {
// The target of one of the symlinks (EvalSymlinks is recursive) does not exist.
// So we get what target path does not exist and check if it's within the data
// directory. If it is, we return the original path, otherwise we return an error.
pErr, ok := err.(*iofs.PathError)
if !ok {
return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
}
ep = pErr.Path
}
// If the requested directory from EvalSymlinks begins with the server root directory go
// ahead and return it. If not we'll return an error which will block any further action
// on the file.
if fs.unsafeIsInDataDirectory(ep) {
// Returning the original path here instead of the resolved path ensures that
// whatever the user is trying to do will work as expected. If we returned the
// resolved path, the user would be unable to know that it is in fact a symlink.
return r, nil
}
return "", NewBadPathResolution(p, r)
}
// Generate a path to the file by cleaning it up and appending the root server path to it. This
// DOES NOT guarantee that the file resolves within the server data directory. You'll want to use
// the fs.unsafeIsInDataDirectory(p) function to confirm.
func (fs *Filesystem) unsafeFilePath(p string) string {
// Calling filepath.Clean on the joined directory will resolve it to the absolute path,
// removing any ../ type of resolution arguments, and leaving us with a direct path link.
//
// This will also trim the existing root path off the beginning of the path passed to
// the function since that can get a bit messy.
return filepath.Clean(filepath.Join(fs.Path(), strings.TrimPrefix(p, fs.Path())))
}
// Check that that path string starts with the server data directory path. This function DOES NOT
// validate that the rest of the path does not end up resolving out of this directory, or that the
// targeted file or folder is not a symlink doing the same thing.
func (fs *Filesystem) unsafeIsInDataDirectory(p string) bool {
return strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", strings.TrimSuffix(fs.Path(), "/")+"/")
}
// Executes the fs.SafePath function in parallel against an array of paths. If any of the calls
// fails an error will be returned.
func (fs *Filesystem) ParallelSafePath(paths []string) ([]string, error) {
var cleaned []string
// Simple locker function to avoid racy appends to the array of cleaned paths.
m := new(sync.Mutex)
push := func(c string) {
m.Lock()
cleaned = append(cleaned, c)
m.Unlock()
}
// Create an error group that we can use to run processes in parallel while retaining
// the ability to cancel the entire process immediately should any of it fail.
g, ctx := errgroup.WithContext(context.Background())
// Iterate over all of the paths and generate a cleaned path, if there is an error for any
// of the files, abort the process.
for _, p := range paths {
// Create copy so we can use it within the goroutine correctly.
pi := p
// Recursively call this function to continue digging through the directory tree within
// a separate goroutine. If the context is canceled abort this process.
g.Go(func() error {
select {
case <-ctx.Done():
return ctx.Err()
default:
// If the callback returns true, go ahead and keep walking deeper. This allows
// us to programmatically continue deeper into directories, or stop digging
// if that pathway knows it needs nothing else.
if c, err := fs.SafePath(pi); err != nil {
return err
} else {
push(c)
}
return nil
}
})
}
// Block until all of the routines finish and have returned a value.
return cleaned, g.Wait()
}

View File

@@ -12,11 +12,85 @@ import (
func TestFilesystem_Path(t *testing.T) {
g := Goblin(t)
fs := NewFs()
fs, rfs := NewFs()
g.Describe("Path", func() {
g.It("returns the root path for the instance", func() {
g.Assert(fs.Path()).Equal(fs.rootPath)
g.Assert(fs.Path()).Equal(filepath.Join(rfs.root, "/server"))
})
})
}
func TestFilesystem_SafePath(t *testing.T) {
g := Goblin(t)
fs, rfs := NewFs()
prefix := filepath.Join(rfs.root, "/server")
g.Describe("SafePath", func() {
g.It("returns a cleaned path to a given file", func() {
p, err := fs.SafePath("test.txt")
g.Assert(err).IsNil()
g.Assert(p).Equal(prefix + "/test.txt")
p, err = fs.SafePath("/test.txt")
g.Assert(err).IsNil()
g.Assert(p).Equal(prefix + "/test.txt")
p, err = fs.SafePath("./test.txt")
g.Assert(err).IsNil()
g.Assert(p).Equal(prefix + "/test.txt")
p, err = fs.SafePath("/foo/../test.txt")
g.Assert(err).IsNil()
g.Assert(p).Equal(prefix + "/test.txt")
p, err = fs.SafePath("/foo/bar")
g.Assert(err).IsNil()
g.Assert(p).Equal(prefix + "/foo/bar")
})
g.It("handles root directory access", func() {
p, err := fs.SafePath("/")
g.Assert(err).IsNil()
g.Assert(p).Equal(prefix)
p, err = fs.SafePath("")
g.Assert(err).IsNil()
g.Assert(p).Equal(prefix)
})
g.It("removes trailing slashes from paths", func() {
p, err := fs.SafePath("/foo/bar/")
g.Assert(err).IsNil()
g.Assert(p).Equal(prefix + "/foo/bar")
})
g.It("handles deeply nested directories that do not exist", func() {
p, err := fs.SafePath("/foo/bar/baz/quaz/../../ducks/testing.txt")
g.Assert(err).IsNil()
g.Assert(p).Equal(prefix + "/foo/bar/ducks/testing.txt")
})
g.It("blocks access to files outside the root directory", func() {
p, err := fs.SafePath("../test.txt")
g.Assert(err).IsNotNil()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
g.Assert(p).Equal("")
p, err = fs.SafePath("/../test.txt")
g.Assert(err).IsNotNil()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
g.Assert(p).Equal("")
p, err = fs.SafePath("./foo/../../test.txt")
g.Assert(err).IsNotNil()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
g.Assert(p).Equal("")
p, err = fs.SafePath("..")
g.Assert(err).IsNotNil()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
g.Assert(p).Equal("")
})
})
}
@@ -27,24 +101,30 @@ func TestFilesystem_Path(t *testing.T) {
// the calls and ensure they all fail with the same reason.
func TestFilesystem_Blocks_Symlinks(t *testing.T) {
g := Goblin(t)
fs := NewFs()
fs, rfs := NewFs()
fs.write("../malicious.txt", []byte("external content"))
if err := os.Mkdir(filepath.Join(fs.rootPath, "../malicious_dir"), 0o777); err != nil {
if err := rfs.CreateServerFileFromString("/../malicious.txt", "external content"); err != nil {
panic(err)
}
links := map[string]string{
"../malicious.txt": "/symlinked.txt",
"../malicious_does_not_exist.txt": "/symlinked_does_not_exist.txt",
"/symlinked_does_not_exist.txt": "/symlinked_does_not_exist2.txt",
"../malicious_dir": "/external_dir",
if err := os.Mkdir(filepath.Join(rfs.root, "/malicious_dir"), 0o777); err != nil {
panic(err)
}
for src, dst := range links {
if err := os.Symlink(filepath.Join(fs.rootPath, src), filepath.Join(fs.rootPath, dst)); err != nil {
panic(err)
}
if err := os.Symlink(filepath.Join(rfs.root, "malicious.txt"), filepath.Join(rfs.root, "/server/symlinked.txt")); err != nil {
panic(err)
}
if err := os.Symlink(filepath.Join(rfs.root, "malicious_does_not_exist.txt"), filepath.Join(rfs.root, "/server/symlinked_does_not_exist.txt")); err != nil {
panic(err)
}
if err := os.Symlink(filepath.Join(rfs.root, "/server/symlinked_does_not_exist.txt"), filepath.Join(rfs.root, "/server/symlinked_does_not_exist2.txt")); err != nil {
panic(err)
}
if err := os.Symlink(filepath.Join(rfs.root, "/malicious_dir"), filepath.Join(rfs.root, "/server/external_dir")); err != nil {
panic(err)
}
g.Describe("Writefile", func() {
@@ -53,7 +133,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
err := fs.Writefile("symlinked.txt", r)
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("cannot write to a non-existent file symlinked outside the root", func() {
@@ -61,7 +141,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
err := fs.Writefile("symlinked_does_not_exist.txt", r)
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("cannot write to chained symlinks with target that does not exist outside the root", func() {
@@ -69,7 +149,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
err := fs.Writefile("symlinked_does_not_exist2.txt", r)
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("cannot write a file to a directory symlinked outside the root", func() {
@@ -77,7 +157,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
err := fs.Writefile("external_dir/foo.txt", r)
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
})
@@ -85,61 +165,41 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
g.It("cannot create a directory outside the root", func() {
err := fs.CreateDirectory("my_dir", "external_dir")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("cannot create a nested directory outside the root", func() {
err := fs.CreateDirectory("my/nested/dir", "../external_dir/foo/bar")
err := fs.CreateDirectory("my/nested/dir", "external_dir/foo/bar")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("cannot create a nested directory outside the root", func() {
err := fs.CreateDirectory("my/nested/dir", "../external_dir/server")
err := fs.CreateDirectory("my/nested/dir", "external_dir/server")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
})
g.Describe("Rename", func() {
// You can rename the symlink file itself, which does not impact the
// underlying symlinked target file outside the server directory.
g.It("can rename a file symlinked outside the directory root", func() {
g.It("cannot rename a file symlinked outside the directory root", func() {
err := fs.Rename("symlinked.txt", "foo.txt")
g.Assert(err).IsNil()
st, err := os.Lstat(filepath.Join(fs.rootPath, "foo.txt"))
g.Assert(err).IsNil()
g.Assert(st.Mode()&os.ModeSymlink != 0).IsTrue()
st, err = os.Lstat(filepath.Join(fs.rootPath, "../malicious.txt"))
g.Assert(err).IsNil()
g.Assert(st.Mode()&os.ModeSymlink == 0).IsTrue()
g.Assert(err).IsNotNil()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
// The same as above, acts on the source directory and not the target directory,
// therefore, this is allowed.
g.It("can rename a directory symlinked outside the root", func() {
g.It("cannot rename a symlinked directory outside the root", func() {
err := fs.Rename("external_dir", "foo")
g.Assert(err).IsNil()
st, err := os.Lstat(filepath.Join(fs.rootPath, "foo"))
g.Assert(err).IsNil()
g.Assert(st.IsDir()).IsTrue()
g.Assert(st.Mode()&os.ModeSymlink != 0).IsTrue()
st, err = os.Lstat(filepath.Join(fs.rootPath, "../external_dir"))
g.Assert(err).IsNil()
g.Assert(st.IsDir()).IsTrue()
g.Assert(st.Mode()&os.ModeSymlink == 0).IsTrue()
g.Assert(err).IsNotNil()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("cannot rename a file to a location outside the directory root", func() {
fs.write("my_file.txt", []byte("internal content"))
rfs.CreateServerFileFromString("my_file.txt", "internal content")
err := fs.Rename("my_file.txt", "../external_dir/my_file.txt")
err := fs.Rename("my_file.txt", "external_dir/my_file.txt")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
})
@@ -147,13 +207,13 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
g.It("cannot chown a file symlinked outside the directory root", func() {
err := fs.Chown("symlinked.txt")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.It("cannot chown a directory symlinked outside the directory root", func() {
err := fs.Chown("external_dir")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
})
@@ -161,7 +221,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
g.It("cannot copy a file symlinked outside the directory root", func() {
err := fs.Copy("symlinked.txt")
g.Assert(err).IsNotNil()
g.Assert(IsPathError(err)).IsTrue()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
})
@@ -170,14 +230,14 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
err := fs.Delete("symlinked.txt")
g.Assert(err).IsNil()
_, err = os.Stat(filepath.Join(fs.rootPath, "../malicious.txt"))
_, err = os.Stat(filepath.Join(rfs.root, "malicious.txt"))
g.Assert(err).IsNil()
_, err = os.Stat(filepath.Join(fs.rootPath, "symlinked.txt"))
_, err = rfs.StatServerFile("symlinked.txt")
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
})
})
fs.reset()
rfs.reset()
}

View File

@@ -5,7 +5,6 @@ import (
"strconv"
"time"
"emperror.dev/errors"
"github.com/gabriel-vasile/mimetype"
"github.com/goccy/go-json"
)
@@ -37,7 +36,7 @@ func (s *Stat) MarshalJSON() ([]byte, error) {
Size: s.Size(),
Directory: s.IsDir(),
File: !s.IsDir(),
Symlink: s.Mode().Type()&os.ModeSymlink != 0,
Symlink: s.Mode().Perm()&os.ModeSymlink != 0,
Mime: s.Mimetype,
})
}
@@ -45,22 +44,24 @@ func (s *Stat) MarshalJSON() ([]byte, error) {
// Stat stats a file or folder and returns the base stat object from go along
// with the MIME data that can be used for editing files.
func (fs *Filesystem) Stat(p string) (Stat, error) {
p = normalize(p)
s, err := fs.root.Stat(p)
cleaned, err := fs.SafePath(p)
if err != nil {
return Stat{}, errors.Wrap(err, "server/filesystem: stat: failed to stat file")
return Stat{}, err
}
return fs.unsafeStat(cleaned)
}
func (fs *Filesystem) unsafeStat(p string) (Stat, error) {
s, err := os.Stat(p)
if err != nil {
return Stat{}, err
}
var m *mimetype.MIME
if !s.IsDir() {
f, err := fs.root.Open(p)
m, err = mimetype.DetectFile(p)
if err != nil {
return Stat{}, errors.Wrap(err, "server/filesystem: stat: failed to open file")
}
defer f.Close()
m, err = mimetype.DetectReader(f)
if err != nil {
return Stat{}, errors.Wrap(err, "server/filesystem: stat: failed to detect mimetype")
return Stat{}, err
}
}
@@ -74,11 +75,3 @@ func (fs *Filesystem) Stat(p string) (Stat, error) {
return st, nil
}
func (fs *Filesystem) Stat2(p string) (os.FileInfo, error) {
st, err := fs.root.Stat(normalize(p))
if err != nil {
return st, errors.Wrap(err, "server/filesystem: stat2: failed to stat file")
}
return st, nil
}

View File

@@ -0,0 +1,13 @@
package filesystem
import (
"syscall"
"time"
)
// CTime returns the time that the file/folder was created.
func (s *Stat) CTime() time.Time {
st := s.Sys().(*syscall.Stat_t)
return time.Unix(st.Ctimespec.Sec, st.Ctimespec.Nsec)
}

View File

@@ -3,22 +3,12 @@ package filesystem
import (
"syscall"
"time"
"golang.org/x/sys/unix"
)
// CTime returns the time that the file/folder was created.
//
// TODO: remove. Ctim is not actually ever been correct and doesn't actually
// return the creation time.
// Returns the time that the file/folder was created.
func (s *Stat) CTime() time.Time {
if st, ok := s.Sys().(*unix.Stat_t); ok {
// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
}
if st, ok := s.Sys().(*syscall.Stat_t); ok {
// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
}
return time.Time{}
st := s.Sys().(*syscall.Stat_t)
// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
}

View File

@@ -0,0 +1,12 @@
package filesystem
import (
"time"
)
// On linux systems this will return the time that the file was created.
// However, I have no idea how to do this on windows, so we're skipping it
// for right now.
func (s *Stat) CTime() time.Time {
return s.ModTime()
}

View File

@@ -4,6 +4,7 @@ import (
"bufio"
"bytes"
"context"
"fmt"
"html/template"
"io"
"os"
@@ -14,8 +15,8 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
@@ -162,7 +163,7 @@ func (s *Server) SetRestoring(state bool) {
// RemoveContainer removes the installation container for the server.
func (ip *InstallationProcess) RemoveContainer() error {
err := ip.client.ContainerRemove(ip.Server.Context(), ip.Server.ID()+"_installer", container.RemoveOptions{
err := ip.client.ContainerRemove(ip.Server.Context(), ip.Server.ID()+"_installer", types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})
@@ -260,7 +261,7 @@ func (ip *InstallationProcess) pullInstallationImage() error {
}
// Get the ImagePullOptions.
imagePullOptions := image.PullOptions{All: false}
imagePullOptions := types.ImagePullOptions{All: false}
if registryAuth != nil {
b64, err := registryAuth.Base64()
if err != nil {
@@ -273,7 +274,7 @@ func (ip *InstallationProcess) pullInstallationImage() error {
r, err := ip.client.ImagePull(ip.Server.Context(), ip.Script.ContainerImage, imagePullOptions)
if err != nil {
images, ierr := ip.client.ImageList(ip.Server.Context(), image.ListOptions{})
images, ierr := ip.client.ImageList(ip.Server.Context(), types.ImageListOptions{})
if ierr != nil {
// Well damn, something has gone really wrong here, just go ahead and abort there
// isn't much anything we can do to try and self-recover from this.
@@ -345,7 +346,7 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
defer ip.RemoveContainer()
ip.Server.Log().WithField("container_id", containerId).Debug("pulling installation logs for server")
reader, err := ip.client.ContainerLogs(ip.Server.Context(), containerId, container.LogsOptions{
reader, err := ip.client.ContainerLogs(ip.Server.Context(), containerId, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: false,
@@ -426,6 +427,10 @@ func (ip *InstallationProcess) Execute() (string, error) {
}
cfg := config.Get()
if cfg.System.User.Rootless.Enabled {
conf.User = fmt.Sprintf("%d:%d", cfg.System.User.Rootless.ContainerUID, cfg.System.User.Rootless.ContainerGID)
}
tmpfsSize := strconv.Itoa(int(cfg.Docker.TmpfsSize))
hostConf := &container.HostConfig{
Mounts: []mount.Mount{
@@ -448,6 +453,7 @@ func (ip *InstallationProcess) Execute() (string, error) {
},
DNS: cfg.Docker.Network.Dns,
LogConfig: cfg.Docker.ContainerLogConfig(),
Privileged: true,
NetworkMode: container.NetworkMode(cfg.Docker.Network.Mode),
UsernsMode: container.UsernsMode(cfg.Docker.UsernsMode),
}
@@ -456,9 +462,7 @@ func (ip *InstallationProcess) Execute() (string, error) {
// to trigger the reinstall of the server. It is possible the directory would
// not exist when this runs if Wings boots with a missing directory and a user
// triggers a reinstall before trying to start the server.
// todo: verify, this might be wrong now that the FS requires the directory to
// exist to boot it.
if _, err := ip.Server.EnsureDataDirectoryExists(); err != nil {
if err := ip.Server.EnsureDataDirectoryExists(); err != nil {
return "", err
}
@@ -478,7 +482,7 @@ func (ip *InstallationProcess) Execute() (string, error) {
}
ip.Server.Log().WithField("container_id", r.ID).Info("running installation script for server in container")
if err := ip.client.ContainerStart(ctx, r.ID, container.StartOptions{}); err != nil {
if err := ip.client.ContainerStart(ctx, r.ID, types.ContainerStartOptions{}); err != nil {
return "", err
}
@@ -513,7 +517,7 @@ func (ip *InstallationProcess) Execute() (string, error) {
// the server configuration directory, as well as to a websocket listener so
// that the process can be viewed in the panel by administrators.
func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) error {
opts := container.LogsOptions{ShowStdout: true, ShowStderr: true, Follow: true}
opts := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Follow: true}
reader, err := ip.client.ContainerLogs(ctx, id, opts)
if err != nil {
return err

View File

@@ -2,10 +2,10 @@ package server
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"sync"
"time"
@@ -13,6 +13,7 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gammazero/workerpool"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
@@ -195,17 +196,7 @@ func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server,
return nil, errors.WithStackIf(err)
}
d, err := s.EnsureDataDirectoryExists()
if err != nil {
return nil, err
}
fs, err := filesystem.New(d, s.DiskSpace(), s.Config().Egg.FileDenylist)
if err != nil {
return nil, err
}
s.fs = fs
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
// Right now we only support a Docker based environment, so I'm going to hard code
// this logic in. When we're ready to support other environment we'll need to make

View File

@@ -29,34 +29,6 @@ func (s *Server) Mounts() []environment.Mount {
},
}
cfg := config.Get()
// Handle mounting a generated `/etc/passwd` if the feature is enabled.
if cfg.System.Passwd.Enable {
s.Log().WithFields(log.Fields{"source_path": cfg.System.Passwd.Directory}).Info("mouting generated /etc/{group,passwd} to workaround UID/GID issues")
m = append(m, environment.Mount{
Source: filepath.Join(cfg.System.Passwd.Directory, "group"),
Target: "/etc/group",
ReadOnly: true,
})
m = append(m, environment.Mount{
Source: filepath.Join(cfg.System.Passwd.Directory, "passwd"),
Target: "/etc/passwd",
ReadOnly: true,
})
}
if cfg.System.MachineID.Enable {
// Hytale wants a machine-id in order to encrypt tokens for the server.
// So add a mount to `/etc/machine-id` to a source that contains the
// server's UUID without any dashes.
m = append(m, environment.Mount{
Source: filepath.Join(cfg.System.MachineID.Directory, s.ID()),
Target: "/etc/machine-id",
ReadOnly: true,
})
}
// Also include any of this server's custom mounts when returning them.
return append(m, s.customMounts()...)
}
@@ -84,12 +56,14 @@ func (s *Server) customMounts() []environment.Mount {
if !strings.HasPrefix(source, filepath.Clean(allowed)) {
continue
}
mounted = true
mounts = append(mounts, environment.Mount{
Source: source,
Target: target,
ReadOnly: m.ReadOnly,
})
break
}

View File

@@ -3,6 +3,7 @@ package server
import (
"context"
"fmt"
"os"
"time"
"emperror.dev/errors"
@@ -160,7 +161,7 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return s.Environment.Start(s.Context())
case PowerActionTerminate:
return s.Environment.Terminate(s.Context(), "SIGKILL")
return s.Environment.Terminate(s.Context(), os.Kill)
}
return errors.New("attempting to handle unknown power action")

View File

@@ -1,19 +1,17 @@
package server
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/creasty/defaults"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
@@ -71,7 +69,6 @@ type Server struct {
// Tracks open websocket connections for the server.
wsBag *WebsocketBag
wsBagLocker sync.Mutex
sftpBag *system.ContextBag
sinks map[system.SinkName]*system.SinkPool
@@ -202,16 +199,6 @@ func (s *Server) Sync() error {
s.SyncWithEnvironment()
// If the server is suspended immediately disconnect all open websocket connections
// and any connected SFTP clients. We don't need to worry about revoking any JWTs
// here since they'll be blocked from re-connecting to the websocket anyways. This
// just forces the client to disconnect and attempt to reconnect (rather than waiting
// on them to send a message and hit that disconnect logic).
if s.IsSuspended() {
s.Websockets().CancelAll()
s.Sftp().CancelAll()
}
return nil
}
@@ -257,22 +244,10 @@ func (s *Server) ReadLogfile(len int) ([]string, error) {
// for the server is setup, and that all of the necessary files are created.
func (s *Server) CreateEnvironment() error {
// Ensure the data directory exists before getting too far through this process.
if _, err := s.EnsureDataDirectoryExists(); err != nil {
if err := s.EnsureDataDirectoryExists(); err != nil {
return err
}
cfg := *config.Get()
if cfg.System.MachineID.Enable {
// Hytale wants a machine-id in order to encrypt tokens for the server. So
// write a machine-id file for the server that contains the server's UUID
// without any dashes.
p := filepath.Join(cfg.System.MachineID.Directory, s.ID())
machineID := append(bytes.ReplaceAll([]byte(s.ID()), []byte{'-'}, []byte{}), '\n')
if err := os.WriteFile(p, machineID, 0o644); err != nil {
return errors.Wrap(err, "server: failed to write machine-id to disk")
}
}
return s.Environment.Create()
}
@@ -295,25 +270,21 @@ func (s *Server) Filesystem() *filesystem.Filesystem {
// EnsureDataDirectoryExists ensures that the data directory for the server
// instance exists.
func (s *Server) EnsureDataDirectoryExists() (string, error) {
c := *config.Get()
path := filepath.Join(c.System.Data, s.ID())
if _, err := os.Lstat(path); err != nil {
if !errors.Is(err, os.ErrNotExist) {
return path, errors.Wrap(err, "server: failed to stat server root directory")
}
if err := os.MkdirAll(path, 0o700); err != nil {
return path, errors.Wrap(err, "server: failed to create root directory")
}
if err := os.Chown(path, c.System.User.Uid, c.System.User.Gid); err != nil {
return path, errors.Wrap(err, "server: failed to chown newly created root directory")
func (s *Server) EnsureDataDirectoryExists() error {
if _, err := os.Lstat(s.fs.Path()); err != nil {
if os.IsNotExist(err) {
s.Log().Debug("server: creating root directory and setting permissions")
if err := os.MkdirAll(s.fs.Path(), 0o700); err != nil {
return errors.WithStack(err)
}
if err := s.fs.Chown("/"); err != nil {
s.Log().WithField("error", err).Warn("server: failed to chown server data directory")
}
} else {
return errors.WrapIf(err, "server: failed to stat server root directory")
}
}
return path, nil
return nil
}
// OnStateChange sets the state of the server internally. This function handles crash detection as

View File

@@ -4,16 +4,14 @@ import (
"context"
"fmt"
"io"
"os"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/progress"
"github.com/pterodactyl/wings/server/filesystem"
)
// Archive returns an archive that can be used to stream the contents of the
// contents of a server.
func (t *Transfer) Archive(r *os.Root) (*Archive, error) {
func (t *Transfer) Archive() (*Archive, error) {
if t.archive == nil {
// Get the disk usage of the server (used to calculate the progress of the archive process)
rawSize, err := t.Server.Filesystem().DiskUsage(true)
@@ -21,12 +19,8 @@ func (t *Transfer) Archive(r *os.Root) (*Archive, error) {
return nil, fmt.Errorf("transfer: failed to get server disk usage: %w", err)
}
a, err := filesystem.NewArchive(r, "/", filesystem.WithProgress(progress.NewProgress(uint64(rawSize))))
if err != nil {
_ = r.Close()
return nil, errors.WrapIf(err, "server/transfer: failed to create archive")
}
t.archive = &Archive{archive: a}
// Create a new archive instance and assign it to the transfer.
t.archive = NewArchive(t, uint64(rawSize))
}
return t.archive, nil
@@ -37,6 +31,16 @@ type Archive struct {
archive *filesystem.Archive
}
// NewArchive returns a new archive associated with the given transfer.
func NewArchive(t *Transfer, size uint64) *Archive {
return &Archive{
archive: &filesystem.Archive{
BasePath: t.Server.Filesystem().Path(),
Progress: progress.NewProgress(size),
},
}
}
// Stream returns a reader that can be used to stream the contents of the archive.
func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
return a.archive.Stream(ctx, w)
@@ -44,5 +48,5 @@ func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
// Progress returns the current progress of the archive.
func (a *Archive) Progress() *progress.Progress {
return a.archive.Progress()
return a.archive.Progress
}

View File

@@ -4,14 +4,13 @@ import (
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"time"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/progress"
)
@@ -24,13 +23,7 @@ func (t *Transfer) PushArchiveToTarget(url, token string) ([]byte, error) {
t.SendMessage("Preparing to stream server data to destination...")
t.SetStatus(StatusProcessing)
r, err := os.OpenRoot(t.Server.Filesystem().Path())
if err != nil {
return nil, errors.Wrap(err, "server/transfer: failed to open root directory")
}
defer r.Close()
a, err := t.Archive(r)
a, err := t.Archive()
if err != nil {
t.Error(err, "Failed to get archive for transfer.")
return nil, errors.New("failed to get archive for transfer")

View File

@@ -28,7 +28,6 @@ func (s *Server) SyncWithEnvironment() {
Mounts: s.Mounts(),
Allocations: cfg.Allocations,
Limits: cfg.Build,
Labels: cfg.Labels,
})
// For Docker specific environments we also want to update the configured image

View File

@@ -25,13 +25,6 @@ func (s *Server) Websockets() *WebsocketBag {
return s.wsBag
}
func (w *WebsocketBag) Len() int {
w.mu.Lock()
defer w.mu.Unlock()
return len(w.conns)
}
// Push adds a new websocket connection to the end of the stack.
func (w *WebsocketBag) Push(u uuid.UUID, cancel *context.CancelFunc) {
w.mu.Lock()

View File

@@ -2,6 +2,7 @@ package sftp
import (
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -121,7 +122,7 @@ func (h *Handler) Filewrite(request *sftp.Request) (io.WriterAt, error) {
if !h.can(permission) {
return nil, sftp.ErrSSHFxPermissionDenied
}
f, err := h.fs.Touch(request.Filepath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644)
f, err := h.fs.Touch(request.Filepath, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
if err != nil {
l.WithField("flags", request.Flags).WithField("error", err).Error("failed to open existing file on system")
return nil, sftp.ErrSSHFxFailure
@@ -213,12 +214,22 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
}
h.events.MustLog(server.ActivitySftpCreateDirectory, FileAction{Entity: request.Filepath})
break
// Support creating symlinks between files. The source and target must resolve within
// the server home directory.
case "Symlink":
if !h.can(PermissionFileCreate) {
return sftp.ErrSSHFxPermissionDenied
}
if err := h.fs.Symlink(request.Filepath, request.Target); err != nil {
l.WithField("target", request.Target).WithField("error", err).Error("failed to create symlink")
source, err := h.fs.SafePath(request.Filepath)
if err != nil {
return sftp.ErrSSHFxNoSuchFile
}
target, err := h.fs.SafePath(request.Target)
if err != nil {
return sftp.ErrSSHFxNoSuchFile
}
if err := os.Symlink(source, target); err != nil {
l.WithField("target", target).WithField("error", err).Error("failed to create symlink")
return sftp.ErrSSHFxFailure
}
break
@@ -263,23 +274,18 @@ func (h *Handler) Filelist(request *sftp.Request) (sftp.ListerAt, error) {
switch request.Method {
case "List":
d, err := h.fs.ReadDir(request.Filepath)
p, err := h.fs.SafePath(request.Filepath)
if err != nil {
return nil, sftp.ErrSSHFxNoSuchFile
}
files, err := ioutil.ReadDir(p)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, sftp.ErrSSHFxNoSuchFile
}
h.logger.WithField("source", request.Filepath).WithField("error", err).Error("error while listing directory")
return nil, sftp.ErrSSHFxFailure
}
files := make([]os.FileInfo, len(d))
for _, entry := range d {
if i, err := entry.Info(); err == nil {
files = append(files, i)
}
}
return ListerAt(files), nil
case "Stat":
st, err := h.fs.Stat2(request.Filepath)
st, err := h.fs.Stat(request.Filepath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, sftp.ErrSSHFxNoSuchFile
@@ -287,7 +293,7 @@ func (h *Handler) Filelist(request *sftp.Request) (sftp.ListerAt, error) {
h.logger.WithField("source", request.Filepath).WithField("error", err).Error("error performing stat on file")
return nil, sftp.ErrSSHFxFailure
}
return ListerAt([]os.FileInfo{st}), nil
return ListerAt([]os.FileInfo{st.FileInfo}), nil
default:
return nil, sftp.ErrSSHFxOpUnsupported
}
@@ -308,7 +314,3 @@ func (h *Handler) can(permission string) bool {
}
return false
}
func (h *Handler) User() string {
return h.events.user
}

View File

@@ -107,7 +107,7 @@ func (c *SFTPServer) Run() error {
go func(conn net.Conn) {
defer conn.Close()
if err := c.AcceptInbound(conn, conf); err != nil {
log.WithField("error", err).WithField("ip", conn.RemoteAddr().String()).Error("sftp: failed to accept inbound connection")
log.WithField("error", err).Error("sftp: failed to accept inbound connection")
}
}(conn)
}
@@ -129,7 +129,7 @@ func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) erro
// If its not a session channel we just move on because its not something we
// know how to handle at this point.
if ch.ChannelType() != "session" {
_ = ch.Reject(ssh.UnknownChannelType, "unknown channel type")
ch.Reject(ssh.UnknownChannelType, "unknown channel type")
continue
}
@@ -143,41 +143,37 @@ func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) erro
// Channels have a type that is dependent on the protocol. For SFTP
// this is "subsystem" with a payload that (should) be "sftp". Discard
// anything else we receive ("pty", "shell", etc)
_ = req.Reply(req.Type == "subsystem" && string(req.Payload[4:]) == "sftp", nil)
req.Reply(req.Type == "subsystem" && string(req.Payload[4:]) == "sftp", nil)
}
}(requests)
if srv, ok := c.manager.Get(sconn.Permissions.Extensions["uuid"]); ok {
if err := c.Handle(sconn, srv, channel); err != nil {
return err
// If no UUID has been set on this inbound request then we can assume we
// have screwed up something in the authentication code. This is a sanity
// check, but should never be encountered (ideally...).
//
// This will also attempt to match a specific server out of the global server
// store and return nil if there is no match.
uuid := sconn.Permissions.Extensions["uuid"]
srv := c.manager.Find(func(s *server.Server) bool {
if uuid == "" {
return false
}
return s.ID() == uuid
})
if srv == nil {
continue
}
}
return nil
}
// Handle spins up a SFTP server instance for the authenticated user's server allowing
// them access to the underlying filesystem.
func (c *SFTPServer) Handle(conn *ssh.ServerConn, srv *server.Server, channel ssh.Channel) error {
handler, err := NewHandler(conn, srv)
if err != nil {
return errors.WithStackIf(err)
}
ctx := srv.Sftp().Context(handler.User())
rs := sftp.NewRequestServer(channel, handler.Handlers())
go func() {
select {
case <-ctx.Done():
srv.Log().WithField("user", conn.User()).Warn("sftp: terminating active session")
// Spin up a SFTP server instance for the authenticated user's server allowing
// them access to the underlying filesystem.
handler, err := NewHandler(sconn, srv)
if err != nil {
return errors.WithStackIf(err)
}
rs := sftp.NewRequestServer(channel, handler.Handlers())
if err := rs.Serve(); err == io.EOF {
_ = rs.Close()
}
}()
if err := rs.Serve(); err == io.EOF {
_ = rs.Close()
}
return nil

View File

@@ -1,3 +1,3 @@
package system
var Version = "develop"
var Version = "1.11.4"

View File

@@ -1,58 +0,0 @@
package system
import (
"context"
"sync"
)
type ctxHolder struct {
ctx context.Context
cancel context.CancelFunc
}
type ContextBag struct {
mu sync.Mutex
ctx context.Context
items map[string]ctxHolder
}
func NewContextBag(ctx context.Context) *ContextBag {
return &ContextBag{ctx: ctx, items: make(map[string]ctxHolder)}
}
// Context returns a context for the given key. If a value already exists in the
// internal map it is returned, otherwise a new cancelable context is returned.
// This context is shared between all callers until the cancel function is called
// by calling Cancel or CancelAll.
func (cb *ContextBag) Context(key string) context.Context {
cb.mu.Lock()
defer cb.mu.Unlock()
if _, ok := cb.items[key]; !ok {
ctx, cancel := context.WithCancel(cb.ctx)
cb.items[key] = ctxHolder{ctx, cancel}
}
return cb.items[key].ctx
}
func (cb *ContextBag) Cancel(key string) {
cb.mu.Lock()
defer cb.mu.Unlock()
if v, ok := cb.items[key]; ok {
v.cancel()
delete(cb.items, key)
}
}
func (cb *ContextBag) CancelAll() {
cb.mu.Lock()
defer cb.mu.Unlock()
for _, v := range cb.items {
v.cancel()
}
cb.items = make(map[string]ctxHolder)
}

View File

@@ -31,8 +31,8 @@ func NewSinkPool() *SinkPool {
// On adds a channel to the sink pool instance.
func (p *SinkPool) On(c chan []byte) {
p.mu.Lock()
defer p.mu.Unlock()
p.sinks = append(p.sinks, c)
p.mu.Unlock()
}
// Off removes a given channel from the sink pool. If no matching sink is found
@@ -69,11 +69,13 @@ func (p *SinkPool) Off(c chan []byte) {
func (p *SinkPool) Destroy() {
p.mu.Lock()
defer p.mu.Unlock()
for _, c := range p.sinks {
if c != nil {
close(c)
}
}
p.sinks = nil
}
@@ -96,7 +98,6 @@ func (p *SinkPool) Destroy() {
func (p *SinkPool) Push(data []byte) {
p.mu.RLock()
defer p.mu.RUnlock()
var wg sync.WaitGroup
wg.Add(len(p.sinks))
for _, c := range p.sinks {
@@ -104,22 +105,15 @@ func (p *SinkPool) Push(data []byte) {
defer wg.Done()
select {
case c <- data:
case <-time.After(10 * time.Millisecond):
// If we cannot send the message to the channel within 10ms,
// then try to drop the oldest message from the channel, then
// send our message.
select {
case <-c:
// Only attempt to send the message if we were able to make
// space for it on the channel.
select {
case c <- data:
default:
}
default:
// Do nothing, this is a fallthrough if there is nothing to
// read from c.
case <-time.After(time.Millisecond * 10):
// If there is nothing in the channel to read, but we also cannot write
// to the channel, just skip over sending data. If we don't do this you'll
// end up blocking the application on the channel read below.
if len(c) == 0 {
break
}
<-c
c <- data
}
}(c)
}

View File

@@ -2,6 +2,7 @@ package system
import (
"fmt"
"reflect"
"sync"
"testing"
"time"
@@ -10,11 +11,11 @@ import (
)
func MutexLocked(m *sync.RWMutex) bool {
unlocked := m.TryLock()
if unlocked {
m.Unlock()
}
return !unlocked
v := reflect.ValueOf(m).Elem()
state := v.FieldByName("w").FieldByName("state")
return state.Int()&1 == 1 || v.FieldByName("readerCount").Int() > 0
}
func TestSink(t *testing.T) {

View File

@@ -6,7 +6,6 @@ import (
"github.com/acobaugh/osrelease"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/system"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/parsers/kernel"
)
@@ -122,22 +121,22 @@ func GetSystemInformation() (*Information, error) {
}, nil
}
func GetDockerInfo(ctx context.Context) (types.Version, system.Info, error) {
func GetDockerInfo(ctx context.Context) (types.Version, types.Info, error) {
// TODO: find a way to re-use the client from the docker environment.
c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return types.Version{}, system.Info{}, err
return types.Version{}, types.Info{}, err
}
defer c.Close()
dockerVersion, err := c.ServerVersion(ctx)
if err != nil {
return types.Version{}, system.Info{}, err
return types.Version{}, types.Info{}, err
}
dockerInfo, err := c.Info(ctx)
if err != nil {
return types.Version{}, system.Info{}, err
return types.Version{}, types.Info{}, err
}
return dockerVersion, dockerInfo, nil

View File

@@ -3,13 +3,13 @@ package system
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"strconv"
"sync"
"emperror.dev/errors"
"github.com/goccy/go-json"
)
var (

View File

@@ -1,7 +1,6 @@
package system
import (
"encoding/json"
"math/rand"
"strings"
"sync"
@@ -9,6 +8,7 @@ import (
"time"
. "github.com/franela/goblin"
"github.com/goccy/go-json"
)
func Test_Utils(t *testing.T) {

BIN
wings-api.paw Normal file

Binary file not shown.