diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..04592d938fb03f0391d1f3296c6fb6e30f9cd924 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,21 @@ +# Compiled binaries and generated documentation +/target/ + +# Profiling data +/perf.data +/perf.data.old +/flamegraph.svg + +# Logs generated by the web server +/access.log +/error.log + +# OS-specific files +.DS_Store +Thumbs.db +.Spotlight-V100 +.Trashes + +# Temporary files used by the editor +*.swp +*.swo \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..b635e3744d7e9570e31d39dc987a6f4af77257f0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +ferron/assets/icon.ico filter=lfs diff=lfs merge=lfs -text +ferron-passwd/assets/icon.ico filter=lfs diff=lfs merge=lfs -text diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000000000000000000000000000000..5251d0f38aa4263fe2dd72c5947d5a9a19bb7464 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: "cargo" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000000000000000000000000000000000..16af500072e91bafbf0345560c41df2cccc9ab61 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,237 @@ +name: Release + +on: + push: + branches: [ "main" ] + +jobs: + release: + # Set the job to run on the platform specified by the matrix below + runs-on: ${{ matrix.runner }} + + # Define the build matrix for cross-compilation + strategy: + matrix: + include: + - name: x86_64-unknown-linux-gnu + runner: ubuntu-latest + target: x86_64-unknown-linux-gnu + command: cross + - name: x86_64-unknown-linux-musl + runner: ubuntu-latest + target: x86_64-unknown-linux-musl + command: cross + - name: i686-unknown-linux-gnu + runner: ubuntu-latest + target: i686-unknown-linux-gnu + command: cross + - name: aarch64-unknown-linux-gnu + runner: ubuntu-latest + target: aarch64-unknown-linux-gnu + command: cross + - name: armv7-unknown-linux-gnueabihf + runner: ubuntu-latest + target: armv7-unknown-linux-gnueabihf + command: cross + - name: riscv64gc-unknown-linux-gnu + runner: ubuntu-latest + target: riscv64gc-unknown-linux-gnu + command: cross + - name: x86_64-unknown-freebsd + runner: ubuntu-latest + target: x86_64-unknown-freebsd + command: cross + - name: x86_64-pc-windows-msvc + runner: windows-latest + target: x86_64-pc-windows-msvc + command: cargo + - name: i686-pc-windows-msvc + runner: windows-latest + target: i686-pc-windows-msvc + command: cargo + - name: aarch64-pc-windows-msvc + runner: windows-latest + target: aarch64-pc-windows-msvc + command: cargo + - name: x86_64-apple-darwin + runner: macos-latest + target: x86_64-apple-darwin + command: cargo + - name: aarch64-apple-darwin + runner: macos-latest + target: aarch64-apple-darwin + command: cargo + + # The steps to run for each matrix item + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + ref: main + fetch-depth: 0 + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + with: + targets: "${{ matrix.target }}" + + - name: Setup cache + uses: Swatinem/rust-cache@v2 + + - name: Determine Ferron version + shell: bash + run: | + FERRON_VERSION_CARGO="$(cat ferron/Cargo.toml | grep -E '^version' | sed -E 's|.*"([0-9a-zA-Z.+-]+)"$|\1|g')" + FERRON_VERSION_GIT="$(git tag --sort=-committerdate | head -n 1 | sed s/[^0-9a-zA-Z.+-]//g)" + if [ "$FERRON_VERSION_CARGO" != "" ]; then + echo "Version determined from Cargo.toml file" + echo "FERRON_VERSION=$FERRON_VERSION_CARGO" >> $GITHUB_ENV + elif [ "$FERRON_VERSION_GIT" != "" ]; then + echo "Version determined from the Git tag" + echo "FERRON_VERSION=$FERRON_VERSION_GIT" >> $GITHUB_ENV + else + echo "Can't determine the server version!" 2>&1 + exit 1 + fi + + - name: Install Cross + if: matrix.command == 'cross' + shell: bash + run: | + curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash + cargo binstall --no-confirm cross + + - name: Build binaries + run: "${{ matrix.command }} build --verbose --locked --release --target ${{ matrix.target }}" + + - name: Prepare for packaging + shell: bash + run: | + mkdir release + find target/${{ matrix.target }}/release -mindepth 1 -maxdepth 1 -type f ! -name "*.*" -o -name "*.exe" -o -name "*.dll" -o -name "*.dylib" -o -name "*.so" | sed -E "s|(.*)|cp -a \1 release|" | bash + cp -a ferron-release.yaml release/ferron.yaml + cp -a wwwroot release + + - name: Create a release ZIP archive + uses: thedoctor0/zip-release@0.7.5 + with: + type: "zip" + filename: "../ferron.zip" + directory: "release" + + - name: Set up SSH + uses: LuisEnMarroquin/setup-ssh-action@v2.0.5 + with: + ORIGIN: ${{ secrets.SSH_HOSTNAME }} + SSHKEY: ${{ secrets.SSH_KEY }} + NAME: ferron-servers + PORT: ${{ secrets.SSH_PORT }} + USER: ${{ secrets.SSH_USERNAME }} + + - name: Release Ferron on Ferron's servers + shell: bash + run: | + ssh ferron-servers "mkdir -p ferron/${{ env.FERRON_VERSION }} || true" + scp ferron.zip ferron-servers:ferron/${{ env.FERRON_VERSION }}/ferron-${{ env.FERRON_VERSION }}-${{ matrix.target }}.zip + + # The "move-ferron-archive" is a custom command that moves the ZIP archive to be served by the download server + ssh ferron-servers "sudo move-ferron-archive ${{ env.FERRON_VERSION }} ${{ matrix.target }}" + + docs: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + ref: main + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + + - name: Setup cache + uses: Swatinem/rust-cache@v2 + + - name: Generate the Rust crate documentation + run: "cargo doc --verbose --locked --release" + + - name: Create the documentation ZIP archive + uses: thedoctor0/zip-release@0.7.5 + with: + type: "zip" + filename: "../../ferron-rustdocs.zip" + directory: "target/doc" + + - name: Set up SSH + uses: LuisEnMarroquin/setup-ssh-action@v2.0.5 + with: + ORIGIN: ${{ secrets.SSH_HOSTNAME }} + SSHKEY: ${{ secrets.SSH_KEY }} + NAME: ferron-servers + PORT: ${{ secrets.SSH_PORT }} + USER: ${{ secrets.SSH_USERNAME }} + + - name: Deploy the documentation + shell: bash + run: | + scp ferron-rustdocs.zip ferron-servers:. + + # The "deploy-ferron-rustdocs" is a custom command that deploys the Ferron's Rust crate documentation + ssh ferron-servers "sudo deploy-ferron-rustdocs ferron-rustdocs.zip && rm ferron-rustdocs.zip" + + docker: + runs-on: ubuntu-latest + + permissions: + packages: write + contents: read + attestations: write + id-token: write + + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + ref: main + fetch-depth: 0 + + - name: Log in to Docker Hub + uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Determine Ferron version + shell: bash + run: | + FERRON_VERSION_CARGO="$(cat ferron/Cargo.toml | grep -E '^version' | sed -E 's|.*"([0-9a-zA-Z.+-]+)"$|\1|g')" + FERRON_VERSION_GIT="$(git tag --sort=-committerdate | head -n 1 | sed s/[^0-9a-zA-Z.+-]//g)" + if [ "$FERRON_VERSION_CARGO" != "" ]; then + echo "Version determined from Cargo.toml file" + echo "FERRON_VERSION=$FERRON_VERSION_CARGO" >> $GITHUB_ENV + elif [ "$FERRON_VERSION_GIT" != "" ]; then + echo "Version determined from the Git tag" + echo "FERRON_VERSION=$FERRON_VERSION_GIT" >> $GITHUB_ENV + else + echo "Can't determine the server version!" 2>&1 + exit 1 + fi + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push Docker image + id: push + uses: docker/build-push-action@3b5e8027fcad23fda98b2e3ac259d8d67585f671 + with: + context: . + file: ./Dockerfile + push: true + tags: "ferronserver/ferron:${{ env.FERRON_VERSION }},ferronserver/ferron:latest" + + - name: Generate artifact attestation + uses: actions/attest-build-provenance@v2 + with: + subject-name: index.docker.io/ferronserver/ferron + subject-digest: ${{ steps.push.outputs.digest }} + push-to-registry: true \ No newline at end of file diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml new file mode 100644 index 0000000000000000000000000000000000000000..213fe0895051abbb0d1ba0e779c5d9ed391cffc0 --- /dev/null +++ b/.github/workflows/rust.yml @@ -0,0 +1,37 @@ +name: Rust + +on: + push: + branches: [ "develop" ] + pull_request: + branches: [ "develop" ] + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - uses: Swatinem/rust-cache@v2 + - name: Build + run: cargo build --verbose + - name: Run tests + run: cargo test --workspace --verbose + + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + components: clippy, rustfmt + - uses: Swatinem/rust-cache@v2 + - name: rustfmt + run: cargo fmt --all -- --check + - name: clippy + run: cargo clippy --workspace -- -D warnings + diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..04592d938fb03f0391d1f3296c6fb6e30f9cd924 --- /dev/null +++ b/.gitignore @@ -0,0 +1,21 @@ +# Compiled binaries and generated documentation +/target/ + +# Profiling data +/perf.data +/perf.data.old +/flamegraph.svg + +# Logs generated by the web server +/access.log +/error.log + +# OS-specific files +.DS_Store +Thumbs.db +.Spotlight-V100 +.Trashes + +# Temporary files used by the editor +*.swp +*.swo \ No newline at end of file diff --git a/.rusty-hook.toml b/.rusty-hook.toml new file mode 100644 index 0000000000000000000000000000000000000000..c43e52ef919c8e8f32632227605ebb3806db507b --- /dev/null +++ b/.rusty-hook.toml @@ -0,0 +1,5 @@ +[hooks] +pre-commit = "cargo clippy --fix --all-targets --allow-staged && cargo fmt && git add -A" + +[logging] +verbose = true diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..15d347d99b0bdfe88cddfc94c1546de862a29677 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Contributor Code of Conduct + +This project adheres to No Code of Conduct. We are all adults. We accept anyone's contributions. Nothing else matters. + +For more information please visit the [No Code of Conduct](https://nocodeofconduct.com) homepage. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..6c32fd7afb9d9841ccaeda2adb2c62260860d05f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,3 @@ +# Contribution guidelines + +_See [contribution page on Ferron's website](https://www.ferronweb.org/contribute)_ \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..a6fbca7a2a0c17c692cfec72e2dfd0cd3365f315 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,4038 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +dependencies = [ + "anstyle", + "once_cell", + "windows-sys 0.59.0", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + +[[package]] +name = "argon2" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072" +dependencies = [ + "base64ct", + "blake2", + "cpufeatures", + "password-hash", +] + +[[package]] +name = "arraydeque" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d902e3d592a523def97af8f317b08ce16b7ab854c1985a0c671e6f15cebc236" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "asn1-rs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 1.0.69", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-compression" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" +dependencies = [ + "brotli", + "flate2", + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "zstd", + "zstd-safe", +] + +[[package]] +name = "async-http-codec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "096146020b08dbc4587685b0730a7ba905625af13c65f8028035cdfd69573c91" +dependencies = [ + "anyhow", + "futures", + "http", + "httparse", + "log", +] + +[[package]] +name = "async-io" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +dependencies = [ + "async-lock", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite", + "parking", + "polling", + "rustix", + "slab", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "async-lock" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +dependencies = [ + "event-listener", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-net" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" +dependencies = [ + "async-io", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + +[[package]] +name = "async-trait" +version = "0.1.88" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "async-web-client" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37381fb4fad3cd9b579628c21a58f528ef029d1f072d10f16cb9431aa2236d29" +dependencies = [ + "async-http-codec", + "async-net", + "futures", + "futures-rustls", + "http", + "lazy_static", + "log", + "rustls-pki-types", + "thiserror 1.0.69", + "webpki-roots", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "backtrace" +version = "0.3.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" + +[[package]] +name = "bit-set" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" + +[[package]] +name = "bitflags" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "bitvec-nom2" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d988fcc40055ceaa85edc55875a08f8abd29018582647fd82ad6128dba14a5f0" +dependencies = [ + "bitvec", + "nom", +] + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "blocking" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" +dependencies = [ + "async-channel", + "async-task", + "futures-io", + "futures-lite", + "piper", +] + +[[package]] +name = "brotli" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf19e729cdbd51af9a397fb9ef8ac8378007b797f8273cfbfdf45dcaa316167b" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "874bb8112abecc98cbd6d81ea4fa7e94fb9449648c93cc89aa40c81c24d7de03" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + +[[package]] +name = "bumpalo" +version = "3.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + +[[package]] +name = "cache_control" +version = "0.2.0" +source = "git+https://github.com/DorianNiemiecSVRJS/rust-cache-control.git#2229d7b60c30eb1291654e2a8935c3c0ae9948e7" + +[[package]] +name = "cc" +version = "1.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chrono" +version = "0.4.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "ci_info" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24f638c70e8c5753795cc9a8c07c44da91554a09e4cf11a7326e8161b0a3c45e" +dependencies = [ + "envmnt", +] + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + +[[package]] +name = "clap" +version = "4.5.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eccb054f56cbd38340b380d4a8e69ef1f02f1af43db2f0cc817a4774d80ae071" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efd9466fac8543255d3b1fcad4762c5e116ffe808c8a3043d4263cd4fd4862a2" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "clap_lex" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" + +[[package]] +name = "cobs" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" + +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "const_panic" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2459fc9262a1aa204eb4b5764ad4f189caec88aea9634389c0a25f8be7f6265e" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "data-encoding" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "deranged" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + +[[package]] +name = "doctest-file" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aac81fa3e28d21450aa4d2ac065992ba96a1d7303efbce51a95f4fd175b67562" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "embedded-io" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1a6892d9eef45c8fa6b9e0086428a2cca8491aca8f787c534a3d6d0bcb3ced" + +[[package]] +name = "embedded-io" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" + +[[package]] +name = "encoding_rs" +version = "0.8.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "envmnt" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2d328fc287c61314c4a61af7cfdcbd7e678e39778488c7cb13ec133ce0f4059" +dependencies = [ + "fsio", + "indexmap 1.9.3", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "event-listener" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" +dependencies = [ + "event-listener", + "pin-project-lite", +] + +[[package]] +name = "fancy-regex" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e24cb5a94bcae1e5408b0effca5cd7172ea3c5755049c5f3af4cd283a165298" +dependencies = [ + "bit-set", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "ferron" +version = "1.0.0" +dependencies = [ + "anyhow", + "async-channel", + "async-compression", + "async-trait", + "base64", + "bytes", + "cache_control", + "chrono", + "clap", + "fancy-regex", + "futures-lite", + "futures-util", + "glob", + "h3", + "h3-quinn", + "hashlink", + "http", + "http-body-util", + "httparse", + "hyper", + "hyper-tungstenite", + "hyper-util", + "interprocess", + "itertools 0.14.0", + "memmem", + "mimalloc", + "new_mime_guess", + "nix", + "ocsp-stapler", + "password-auth", + "pin-project-lite", + "postcard", + "pyo3", + "pyo3-async-runtimes", + "quinn", + "rand", + "rustls", + "rustls-acme", + "rustls-native-certs", + "rustls-pemfile", + "rustls-pki-types", + "rusty-hook", + "serde", + "serde_bytes", + "sha2", + "tokio", + "tokio-rustls", + "tokio-test", + "tokio-tungstenite", + "tokio-util", + "urlencoding", + "winresource", + "yaml-rust2", +] + +[[package]] +name = "ferron-passwd" +version = "1.0.0" +dependencies = [ + "clap", + "mimalloc", + "password-auth", + "rpassword", + "rusty-hook", + "winresource", + "yaml-rust2", +] + +[[package]] +name = "flate2" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fsio" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1fd087255f739f4f1aeea69f11b72f8080e9c2e7645cd06955dad4a178a49e3" + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-lite" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "futures-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" +dependencies = [ + "futures-io", + "rustls", + "rustls-pki-types", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getopts" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "h2" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.9.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h3" +version = "0.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dfb059a4f28a66f186ed16ad912d142f490676acba59353831d7cb45a96b0d3" +dependencies = [ + "bytes", + "fastrand", + "futures-util", + "http", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "h3-quinn" +version = "0.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d482318ae94198fc8e3cbb0b7ba3099c865d744e6ec7c62039ca7b6b6c66fbf" +dependencies = [ + "bytes", + "futures", + "h3", + "quinn", + "tokio", + "tokio-util", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "foldhash", +] + +[[package]] +name = "hashlink" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +dependencies = [ + "hashbrown 0.15.2", + "serde", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "http" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +dependencies = [ + "futures-util", + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", + "webpki-roots", +] + +[[package]] +name = "hyper-tungstenite" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0110a0487cbc65c3d1f38c2ef851dbf8bee8c2761e5a96be6a59ba84412b4752" +dependencies = [ + "http-body-util", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tokio-tungstenite", + "tungstenite", +] + +[[package]] +name = "hyper-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "libc", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +dependencies = [ + "equivalent", + "hashbrown 0.15.2", +] + +[[package]] +name = "indoc" +version = "2.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" + +[[package]] +name = "inout" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" +dependencies = [ + "generic-array", +] + +[[package]] +name = "interprocess" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d941b405bd2322993887859a8ee6ac9134945a24ec5ec763a8a962fc64dfec2d" +dependencies = [ + "doctest-file", + "futures-core", + "libc", + "recvmsg", + "tokio", + "widestring", + "windows-sys 0.52.0", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + +[[package]] +name = "jobserver" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +dependencies = [ + "getrandom 0.3.2", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "jzon" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ab85f84ca42c5ec520e6f3c9966ba1fd62909ce260f8837e248857d2560509" + +[[package]] +name = "konst" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4381b9b00c55f251f2ebe9473aef7c117e96828def1a7cb3bd3f0f903c6894e9" +dependencies = [ + "const_panic", + "konst_kernel", + "typewit", +] + +[[package]] +name = "konst_kernel" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4b1eb7788f3824c629b1116a7a9060d6e898c358ebff59070093d51103dcc3c" +dependencies = [ + "typewit", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.172" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" + +[[package]] +name = "libmimalloc-sys" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec9d6fac27761dabcd4ee73571cdb06b7022dc99089acbe5435691edffaac0f4" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "litemap" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "memmem" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a64a92489e2744ce060c349162be1c5f33c6969234104dbd99ddb5feb08b8c15" + +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "mimalloc" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "995942f432bbb4822a7e9c3faa87a695185b0d09273ba85f097b54f4e458f2af" +dependencies = [ + "libmimalloc-sys", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +dependencies = [ + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", +] + +[[package]] +name = "new_mime_guess" +version = "4.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02a2dfb3559d53e90b709376af1c379462f7fb3085a0177deb73e6ea0d99eff4" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "nias" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab250442c86f1850815b5d268639dff018c0627022bc1940eb2d642ca1ce12f0" + +[[package]] +name = "nix" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "537bc3c4a347b87fd52ac6c03a02ab1302962cfd93373c5d7a112cdc337854cc" +dependencies = [ + "bitflags", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "ocsp-stapler" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65b66402187deedd364531a44e5a0cec4abb971016a24b71a0c5fc254b13b6ae" +dependencies = [ + "anyhow", + "arc-swap", + "base64", + "bytes", + "chrono", + "http", + "rasn", + "rasn-ocsp", + "rasn-pkix", + "readme-rustdocifier", + "reqwest", + "rustls", + "sha1", + "tokio", + "tokio-util", + "tracing", + "url", + "x509-parser", +] + +[[package]] +name = "oid-registry" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" +dependencies = [ + "asn1-rs", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.52.6", +] + +[[package]] +name = "password-auth" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a2a4764cc1f8d961d802af27193c6f4f0124bd0e76e8393cf818e18880f0524" +dependencies = [ + "argon2", + "getrandom 0.2.16", + "password-hash", + "pbkdf2", + "rand_core 0.6.4", + "scrypt", +] + +[[package]] +name = "password-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "pbkdf2" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest", + "hmac", + "password-hash", + "sha2", +] + +[[package]] +name = "pem" +version = "3.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +dependencies = [ + "base64", + "serde", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand", + "futures-io", +] + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "polling" +version = "3.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi", + "pin-project-lite", + "rustix", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "portable-atomic" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" + +[[package]] +name = "postcard" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "170a2601f67cc9dba8edd8c4870b15f71a6a2dc196daec8c83f72b59dff628a8" +dependencies = [ + "cobs", + "embedded-io 0.4.0", + "embedded-io 0.6.1", + "serde", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "pyo3" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5203598f366b11a02b13aa20cab591229ff0a89fd121a308a5df751d5fc9219" +dependencies = [ + "anyhow", + "cfg-if", + "indoc", + "libc", + "memoffset", + "once_cell", + "portable-atomic", + "pyo3-build-config", + "pyo3-ffi", + "pyo3-macros", + "unindent", +] + +[[package]] +name = "pyo3-async-runtimes" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0b83dc42f9d41f50d38180dad65f0c99763b65a3ff2a81bf351dd35a1df8bf" +dependencies = [ + "futures", + "once_cell", + "pin-project-lite", + "pyo3", + "tokio", +] + +[[package]] +name = "pyo3-build-config" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99636d423fa2ca130fa5acde3059308006d46f98caac629418e53f7ebb1e9999" +dependencies = [ + "once_cell", + "target-lexicon", +] + +[[package]] +name = "pyo3-ffi" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78f9cf92ba9c409279bc3305b5409d90db2d2c22392d443a87df3a1adad59e33" +dependencies = [ + "libc", + "pyo3-build-config", +] + +[[package]] +name = "pyo3-macros" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b999cb1a6ce21f9a6b147dcf1be9ffedf02e0043aec74dc390f3007047cecd9" +dependencies = [ + "proc-macro2", + "pyo3-macros-backend", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "pyo3-macros-backend" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "822ece1c7e1012745607d5cf0bcb2874769f0f7cb34c4cde03b9358eb9ef911a" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "pyo3-build-config", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "quinn" +version = "0.11.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" +dependencies = [ + "bytes", + "cfg_aliases", + "futures-io", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2", + "thiserror 2.0.12", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcbafbbdbb0f638fe3f35f3c56739f77a8a1d070cb25603226c83339b391472b" +dependencies = [ + "bytes", + "getrandom 0.3.2", + "rand", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "rustls-platform-verifier", + "slab", + "thiserror 2.0.12", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +dependencies = [ + "rand_chacha", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.2", +] + +[[package]] +name = "rasn" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5379b720091e4bf4a9f118eb46f4ffb67bb8b7551649528c89e265cf880e748" +dependencies = [ + "arrayvec", + "bitvec", + "bitvec-nom2", + "bytes", + "chrono", + "either", + "jzon", + "konst", + "nom", + "num-bigint", + "num-integer", + "num-traits", + "once_cell", + "rasn-derive", + "snafu", +] + +[[package]] +name = "rasn-derive" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e521162112419405837a6590b327f24707ce9f9b3ac9c9c4a4d10673b63abcd8" +dependencies = [ + "either", + "itertools 0.10.5", + "proc-macro2", + "quote", + "rayon", + "syn 1.0.109", + "uuid", +] + +[[package]] +name = "rasn-ocsp" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aada75dbe8ab73b0304c47a9b23c8846b93d19113c771d332dba2065c51c5372" +dependencies = [ + "rasn", + "rasn-pkix", +] + +[[package]] +name = "rasn-pkix" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9f74a31343c2fd11da94025b8dcbeb96bfb207b4d480db99ad5554c117448fa" +dependencies = [ + "rasn", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "rcgen" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" +dependencies = [ + "pem", + "ring", + "rustls-pki-types", + "time", + "yasna", +] + +[[package]] +name = "readme-rustdocifier" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08ad765b21a08b1a8e5cdce052719188a23772bcbefb3c439f0baaf62c56ceac" + +[[package]] +name = "recvmsg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" + +[[package]] +name = "redox_syscall" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "reqwest" +version = "0.12.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-rustls", + "hyper-util", + "ipnet", + "js-sys", + "log", + "mime", + "once_cell", + "percent-encoding", + "pin-project-lite", + "quinn", + "rustls", + "rustls-pemfile", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "system-configuration", + "tokio", + "tokio-rustls", + "tower", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", + "windows-registry", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rpassword" +version = "7.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66d4c8b64f049c6721ec8ccec37ddfc3d641c4a7fca57e8f2a89de509c73df39" +dependencies = [ + "libc", + "rtoolbox", + "windows-sys 0.59.0", +] + +[[package]] +name = "rtoolbox" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7cc970b249fbe527d6e02e0a227762c9108b2f49d81094fe357ffc6d14d7f6f" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls" +version = "0.23.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0" +dependencies = [ + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-acme" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230907c587e32543b0b0b4a41db582dd9acd29775862d400dd799904dedcf4f8" +dependencies = [ + "async-io", + "async-trait", + "async-web-client", + "base64", + "blocking", + "chrono", + "futures", + "futures-rustls", + "http", + "log", + "pem", + "rcgen", + "ring", + "serde", + "serde_json", + "thiserror 2.0.12", + "webpki-roots", + "x509-parser", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +dependencies = [ + "web-time", +] + +[[package]] +name = "rustls-platform-verifier" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4937d110d34408e9e5ad30ba0b0ca3b6a8a390f8db3636db60144ac4fa792750" +dependencies = [ + "core-foundation 0.10.0", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + +[[package]] +name = "rustls-webpki" +version = "0.103.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" + +[[package]] +name = "rusty-hook" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96cee9be61be7e1cbadd851e58ed7449c29c620f00b23df937cb9cbc04ac21a3" +dependencies = [ + "ci_info", + "getopts", + "nias", + "toml 0.5.11", +] + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "salsa20" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" +dependencies = [ + "cipher", +] + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "scrypt" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0516a385866c09368f0b5bcd1caff3366aace790fcd46e2bb032697bb172fd1f" +dependencies = [ + "password-hash", + "pbkdf2", + "salsa20", + "sha2", +] + +[[package]] +name = "security-framework" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +dependencies = [ + "bitflags", + "core-foundation 0.10.0", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_bytes" +version = "0.11.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" + +[[package]] +name = "snafu" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6" +dependencies = [ + "backtrace", + "doc-comment", + "snafu-derive", +] + +[[package]] +name = "snafu-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "socket2" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "target-lexicon" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a" + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "time" +version = "0.3.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" + +[[package]] +name = "time-macros" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.44.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-macros" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-test" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" +dependencies = [ + "async-stream", + "bytes", + "futures-core", + "tokio", + "tokio-stream", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" +dependencies = [ + "futures-util", + "log", + "rustls", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tungstenite", +] + +[[package]] +name = "tokio-util" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +dependencies = [ + "bytes", + "futures-core", + "futures-io", + "futures-sink", + "futures-util", + "hashbrown 0.15.2", + "pin-project-lite", + "slab", + "tokio", +] + +[[package]] +name = "toml" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +dependencies = [ + "serde", +] + +[[package]] +name = "toml" +version = "0.8.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" +dependencies = [ + "indexmap 2.9.0", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "tracing-core" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +dependencies = [ + "once_cell", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "tungstenite" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4793cb5e56680ecbb1d843515b23b6de9a75eb04b66643e256a396d43be33c13" +dependencies = [ + "bytes", + "data-encoding", + "http", + "httparse", + "log", + "rand", + "rustls", + "rustls-pki-types", + "sha1", + "thiserror 2.0.12", + "utf-8", +] + +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + +[[package]] +name = "typewit" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb77c29baba9e4d3a6182d51fa75e3215c7fd1dab8f4ea9d107c716878e55fc0" +dependencies = [ + "typewit_proc_macros", +] + +[[package]] +name = "typewit_proc_macros" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36a83ea2b3c704935a01b4642946aadd445cea40b10935e3f8bd8052b8193d6" + +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-width" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + +[[package]] +name = "unindent" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7264e107f553ccae879d21fbea1d6724ac785e8c3bfc762137959b5802826ef3" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "uuid" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +dependencies = [ + "getrandom 0.3.2", +] + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.101", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-root-certs" +version = "0.26.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180d2741b6115c3d906577e6533ad89472d48d96df00270fccb78233073d77f7" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "webpki-roots" +version = "0.26.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29aad86cec885cafd03e8305fd727c418e970a521322c91688414d5b8efba16b" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "widestring" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "windows-core" +version = "0.61.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings 0.4.0", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "windows-link" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + +[[package]] +name = "windows-registry" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +dependencies = [ + "windows-result", + "windows-strings 0.3.1", + "windows-targets 0.53.0", +] + +[[package]] +name = "windows-result" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "winnow" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cb8234a863ea0e8cd7284fcdd4f145233eb00fee02bbdd9861aec44e6477bc5" +dependencies = [ + "memchr", +] + +[[package]] +name = "winresource" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba4a67c78ee5782c0c1cb41bebc7e12c6e79644daa1650ebbc1de5d5b08593f7" +dependencies = [ + "toml 0.8.22", + "version_check", +] + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags", +] + +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror 1.0.69", + "time", +] + +[[package]] +name = "yaml-rust2" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "818913695e83ece1f8d2a1c52d54484b7b46d0f9c06beeb2649b9da50d9b512d" +dependencies = [ + "arraydeque", + "encoding_rs", + "hashlink", +] + +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.15+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..1ee49dcc4f4a2962ce51f6729362bc8afd28a47c --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,18 @@ +[workspace] +members = [ + "ferron", + "ferron-passwd", +] +resolver = "2" + +[workspace.dependencies] +yaml-rust2 = "0.10.0" +password-auth = { version = "1.0.0", features = ["argon2", "pbkdf2", "scrypt"] } +rusty-hook = "0.11.2" +mimalloc = "0.1.45" + +[profile.release] +strip = true +lto = true +codegen-units = 1 +panic = "abort" diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..e359e43f6e8101c825816dd026faeed2d2ea7f15 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,40 @@ +# Use the official Rust image as a build stage +FROM rust as builder + +# Set the working directory +WORKDIR /usr/src/ferron + +# Copy the source code +COPY . . + +# Build the actual application +RUN cargo build --release + +# Use a Devuan base image for the final image +FROM devuan/devuan + +# Copy the compiled binaries from the builder stage +COPY --from=builder /usr/src/ferron/target/release/ferron /usr/sbin/ferron +COPY --from=builder /usr/src/ferron/target/release/ferron-passwd /usr/sbin/ferron-passwd + +# Copy the web server configuration +COPY ferron-docker.yaml /etc/ferron.yaml + +# Copy the web root contents +RUN mkdir -p /var/www/ferron +COPY wwwroot/* /var/www/ferron + +# Create a directory where Ferron logs are stored +RUN mkdir -p /var/log/ferron + +# Create a "ferron" user and grant the permissions for the log directory and the webroot to that user +RUN useradd -d /nonexistent -s /usr/sbin/nologin -r ferron && chown -hR ferron:ferron /var/www/ferron && chown -hR ferron:ferron /var/log/ferron + +# Expose the port 80 (used for HTTP) +EXPOSE 80 + +# Switch to "ferron" user +USER ferron + +# Set the command to run the binary +CMD ["/usr/sbin/ferron", "-c", "/etc/ferron.yaml"] \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ffe57a668bb275cb82f9a7a86225ac453873595e --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Ferron + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 673a88d0c3bf738d848d61b25723f03f2a9333b6..aa60a962f77b35dfa0afc225997dabdd3f8e9bd0 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,64 @@ ---- -title: Web -emoji: âš¡ -colorFrom: yellow -colorTo: pink -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +

+ + + + Ferron logo + + +

+

+ Ferron - a fast, memory-safe web server written in Rust +

+

+ Static Badge + Website + X (formerly Twitter) Follow + Docker Pulls + GitHub Repo stars +

+ +* * * + +## Features + +- **High performance** - built with Rust’s async capabilities for optimal speed. +- **Memory-safe** - built with Rust, which is a programming language offering memory safety. +- **Extensibility** - modular architecture for easy customization. +- **Secure** - focus on robust security practices and safe concurrency. + +## Components + +Ferron consists of multiple components: + +- **`ferron`**: The main web server. +- **`ferron-passwd`**: A tool for generating user entries with hashed passwords, which can be copied into the web server's configuration file. + +## Building Ferron from source + +You can clone the repository and explore the existing code: + +```sh +git clone https://github.com/ferronweb/ferron.git +cd ferron +``` + +You can then build and run the web server using Cargo: + +```sh +cargo build -r +cargo run -r --bin ferron +``` + +You can also use [Ferron Forge](https://github.com/ferronweb/ferron-forge) to build the web server. Ferron Forge outputs a ZIP archive that can be used by the Ferron installer. + +## Server configuration + +You can check the [Ferron documentation](https://www.ferronweb.org/docs/configuration) to see configuration properties used by Ferron. + +## Contributing + +See [Ferron contribution page](https://www.ferronweb.org/contribute) for details. + +## License + +Ferron is licensed under the MIT License. See `LICENSE` for details. \ No newline at end of file diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000000000000000000000000000000000..d4ec678f9c708b43f39cf519773699ccf989b7b3 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,40 @@ +# Ferron Security Policy + +## Overview +Ferron is a fast, memory-safe web server written in Rust, designed for performance and security. This document outlines the security policies and procedures to ensure Ferron remains a secure and reliable software project. + +## Supported versions +Ferron actively supports the latest stable release and provides security updates for the most recent minor versions. Users are encouraged to upgrade promptly to receive security patches. + +## Reporting security issues +Security is a top priority for Ferron. If you discover a vulnerability, please report it responsibly by sending an email message to [security@ferronweb.org](mailto:security@ferronweb.org). + +We strongly discourage public disclosure of vulnerabilities before a fix is released. + +## Security best practices +To maintain security, we follow these principles: + +- **Memory safety** - Ferron leverages Rust’s ownership model and borrow checker to eliminate memory-related vulnerabilities. +- **Minimal attack surface** - features are enabled only as needed, reducing exposure to potential threats. +- **Regular audits** - code is reviewed regularly, and dependencies are monitored for security vulnerabilities. +- **Safe defaults** - Ferron has some insecure configuration disabled by default, like exposing the server version or directory listings. + +## Secure development process +Ferron follows industry best practices to maintain a secure development lifecycle: + +1. **Code review** - all changes undergo peer review with security checks. +2. **Dependency management** - regularly check and update dependencies to patch known vulnerabilities. +3. **Responsible disclosure** - work with the security community to resolve issues before public disclosure. + +## Handling security incidents +In the event of a security breach or vulnerability: + +1. **Triage** - assess and prioritize the issue based on severity. +2. **Mitigation** - develop and test a fix. +3. **Advisory** - issue a security advisory with mitigation steps and fixed versions. +4. **Update users** - notify users via release notes and security mailing lists. + +## Contact information +For any security concerns, contact us at [security@ferronweb.org](mailto:security@ferronweb.org). Stay updated on security patches via [our website](https://www.ferronweb.org). + +By following this policy, we ensure Ferron remains a secure and trustworthy web server for all users. \ No newline at end of file diff --git a/ferron-docker.yaml b/ferron-docker.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b4df9f3f52e3fe2afeb80f383da4d3ee14d970f --- /dev/null +++ b/ferron-docker.yaml @@ -0,0 +1,4 @@ +global: + wwwroot: /var/www/ferron + logFilePath: /var/log/ferron/access.log + errorLogFilePath: /var/log/ferron/error.log \ No newline at end of file diff --git a/ferron-passwd/Cargo.toml b/ferron-passwd/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..766be9654f064807a59d03f70614b8d649a8cede --- /dev/null +++ b/ferron-passwd/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "ferron-passwd" +version = "1.0.0" +edition = "2021" + +[package.metadata.winresource] +ProductName = "Ferron password utility" + +[dependencies] +clap = { version = "4.5.28", features = ["derive"] } +password-auth.workspace = true +rpassword = "7.4.0" +yaml-rust2.workspace = true +mimalloc = { workspace = true } + +[dev-dependencies] +rusty-hook = { workspace = true } + +[build-dependencies] +winresource = "0.1.19" diff --git a/ferron-passwd/assets/icon.ico b/ferron-passwd/assets/icon.ico new file mode 100644 index 0000000000000000000000000000000000000000..1377636387d6dd051924ecd77cb8a49eb8e2064e --- /dev/null +++ b/ferron-passwd/assets/icon.ico @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea3ac3c25f884b8c4891412ff035851d2eff097e518c8652fc99fe114205951b +size 119437 diff --git a/ferron-passwd/build.rs b/ferron-passwd/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..c7aef82c98cb5cbded2b945e691e136700413466 --- /dev/null +++ b/ferron-passwd/build.rs @@ -0,0 +1,13 @@ +use { + std::{env, io}, + winresource::WindowsResource, +}; + +fn main() -> io::Result<()> { + if env::var_os("CARGO_CFG_WINDOWS").is_some() { + WindowsResource::new() + .set_icon("assets/icon.ico") + .compile()?; + } + Ok(()) +} diff --git a/ferron-passwd/src/main.rs b/ferron-passwd/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..b5ecbaf7921988809fa2f7f8a8eccc8885165c86 --- /dev/null +++ b/ferron-passwd/src/main.rs @@ -0,0 +1,65 @@ +use clap::Parser; +use mimalloc::MiMalloc; +use password_auth::generate_hash; +use rpassword::prompt_password; +use std::process; +use yaml_rust2::{yaml, Yaml, YamlEmitter}; + +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; + +/// A password tool for Ferron +#[derive(Parser, Debug)] +#[command(version, about, long_about = None)] +struct Args { + /// The username, for which you want to generate an user entry + #[arg()] + username: String, +} + +fn main() { + let args = Args::parse(); + + let password = match prompt_password("Password: ") { + Ok(pass) => pass, + Err(e) => { + eprintln!("Error reading password: {}", e); + process::exit(1); + } + }; + let password2 = match prompt_password("Confirm password: ") { + Ok(pass) => pass, + Err(e) => { + eprintln!("Error reading password confirmation: {}", e); + process::exit(1); + } + }; + + if password != password2 { + eprintln!("Passwords don't match!"); + process::exit(1); + } + + let password_hash = generate_hash(password); + + let mut yaml_user_hashmap = yaml::Hash::new(); + yaml_user_hashmap.insert( + Yaml::String("name".to_string()), + Yaml::String(args.username), + ); + yaml_user_hashmap.insert( + Yaml::String("pass".to_string()), + Yaml::String(password_hash), + ); + + let yaml_data = Yaml::Array(vec![Yaml::Hash(yaml_user_hashmap)]); + + let mut output = String::new(); + if let Err(e) = YamlEmitter::new(&mut output).dump(&yaml_data) { + eprintln!("Error generating YAML output: {}", e); + process::exit(1); + } + + println!("Copy the user object below into \"users\" property of either global configuration or a virtual host in the \"ferron.yaml\" file. Remember about the indentation in the server configuration."); + println!("{}", output); +} diff --git a/ferron-release.yaml b/ferron-release.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1625463ca58e93fad0285e7bcf2f035745a8fea9 --- /dev/null +++ b/ferron-release.yaml @@ -0,0 +1,2 @@ +global: + wwwroot: wwwroot \ No newline at end of file diff --git a/ferron.yaml b/ferron.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d7d2617f6fed0668dd1b30f95aa10dffad95d62 --- /dev/null +++ b/ferron.yaml @@ -0,0 +1,11 @@ +# Global server configuration +global: + port: 8080 + logFilePath: access.log + errorLogFilePath: error.log + loadModules: + - cgi + wwwroot: wwwroot # Replace "wwwroot" with desired webroot + enableDirectoryListing: true + cgiScriptExtensions: + - .php # Necessary to be able to execute PHP scripts via PHP-CGI \ No newline at end of file diff --git a/ferron/Cargo.toml b/ferron/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..7cbb056026c3205506221a81a9ebaee77fa3230e --- /dev/null +++ b/ferron/Cargo.toml @@ -0,0 +1,80 @@ +[package] +name = "ferron" +version = "1.0.0" +edition = "2021" + +[package.metadata.winresource] +ProductName = "Ferron" + +[dependencies] +hyper = { version = "1.6.0", features = ["full"] } +tokio = { version = "1.44.2", features = ["full"] } +http-body-util = "0.1.0" +hyper-util = { version = "0.1", features = ["full"] } +tokio-util = { version = "0.7.13", features = ["io"] } +rustls = { version = "0.23.24", default-features = false, features = ["tls12", "std", "ring"] } +rustls-acme = { version = "0.13.0", default-features = false, features = ["tls12", "ring"] } +tokio-rustls = { version = "0.26.1", default-features = false, features = ["tls12", "ring"] } +rustls-pki-types = "1.11.0" +rustls-pemfile = "2.2.0" +yaml-rust2 = { workspace = true } +anyhow = "1.0.98" +futures-util = "0.3.31" +chrono = "0.4.39" +async-trait = "0.1.86" +rustls-native-certs = "0.8.1" +ocsp-stapler = { version = "0.4.4", default-features = false } +clap = { version = "4.5.28", features = ["derive"] } +fancy-regex = "0.14.0" +password-auth = { workspace = true } +base64 = "0.22.1" +sha2 = "0.10.8" +new_mime_guess = "4.0.4" +async-compression = { version = "0.4.18", features = ["tokio", "gzip", "brotli", "deflate", "zstd"] } +urlencoding = "2.1.3" +async-channel = "2.3.1" +mimalloc = { workspace = true } +cache_control = { git = "https://github.com/DorianNiemiecSVRJS/rust-cache-control.git", optional = true } # Temporarily replaced with a fork +itertools = { version = "0.14.0", optional = true } +rand = "0.9.0" +memmem = { version = "0.1.1", optional = true } +httparse = { version = "1.10.0", optional = true } +pin-project-lite = "0.2.16" +hashlink = "0.10.0" +glob = "0.3.2" +hyper-tungstenite = "0.17.0" +tokio-tungstenite = { version = "0.26.2", features = ["rustls-tls-native-roots"] } +http = "1.2.0" +pyo3 = { version = "0.24.1", optional = true, features = ["anyhow", "auto-initialize"] } +futures-lite = "2.6.0" +nix = { version = "0.30.0", optional = true, features = ["process", "signal"] } +interprocess = { version = "2.2.3", features = ["tokio"], optional = true } +serde = { version = "1.0.219", optional = true, features = ["derive"] } +serde_bytes = { version = "0.11.17", optional = true } +postcard = { version = "1.1.1", optional = true, default-features = false, features = ["use-std"] } +bytes = { version = "1.10.1", optional = true } +pyo3-async-runtimes = { version = "0.24.0", optional = true, features = ["tokio", "tokio-runtime"] } +h3 = "0.0.7" +h3-quinn = "0.0.9" +quinn = "0.11.7" + +[dev-dependencies] +tokio-test = "0.4.4" +rusty-hook = { workspace = true } + +[features] +default = ["cache", "cgi", "fauth", "fcgi", "fproxy", "rproxy", "scgi"] +asgi = ["pyo3", "pyo3-async-runtimes"] +cache = ["cache_control", "itertools"] +cgi = ["httparse", "memmem"] +example = [] +fauth = [] +fcgi = ["httparse", "memmem"] +fproxy = [] +rproxy = [] +scgi = ["httparse", "memmem"] +wsgi = ["pyo3"] +wsgid = ["pyo3", "nix", "interprocess", "itertools", "serde", "serde_bytes", "postcard", "hashlink/serde", "hashlink/serde_impl", "bytes"] + +[build-dependencies] +winresource = "0.1.19" diff --git a/ferron/assets/icon.ico b/ferron/assets/icon.ico new file mode 100644 index 0000000000000000000000000000000000000000..b7cda702df2719c7163b2f56a6aa2eeb76f06519 --- /dev/null +++ b/ferron/assets/icon.ico @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d013886361c7a885c9ab6d82455b316e68515030d3fe8dee7d970eb0950e4a0a +size 112877 diff --git a/ferron/build.rs b/ferron/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..c7aef82c98cb5cbded2b945e691e136700413466 --- /dev/null +++ b/ferron/build.rs @@ -0,0 +1,13 @@ +use { + std::{env, io}, + winresource::WindowsResource, +}; + +fn main() -> io::Result<()> { + if env::var_os("CARGO_CFG_WINDOWS").is_some() { + WindowsResource::new() + .set_icon("assets/icon.ico") + .compile()?; + } + Ok(()) +} diff --git a/ferron/src/common/log.rs b/ferron/src/common/log.rs new file mode 100644 index 0000000000000000000000000000000000000000..ebd83788c381ea3dc907c68bd38369f5a0cab728 --- /dev/null +++ b/ferron/src/common/log.rs @@ -0,0 +1,32 @@ +/// Represents a log message with its content and error status. +pub struct LogMessage { + is_error: bool, + message: String, +} + +impl LogMessage { + /// Creates a new `LogMessage` instance. + /// + /// # Parameters + /// + /// - `message`: The content of the log message. + /// - `is_error`: A boolean indicating whether the message is an error (`true`) or not (`false`). + /// + /// # Returns + /// + /// A `LogMessage` object containing the specified message and error status. + pub fn new(message: String, is_error: bool) -> Self { + Self { is_error, message } + } + + /// Consumes the `LogMessage` and returns its components. + /// + /// # Returns + /// + /// A tuple containing: + /// - `String`: The content of the log message. + /// - `bool`: A boolean indicating whether the message is an error. + pub fn get_message(self) -> (String, bool) { + (self.message, self.is_error) + } +} diff --git a/ferron/src/common/mod.rs b/ferron/src/common/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..a8bd664b15476f9fa47299bfe0069dc66d51a0eb --- /dev/null +++ b/ferron/src/common/mod.rs @@ -0,0 +1,593 @@ +#![allow(dead_code)] + +use std::{error::Error, future::Future, net::SocketAddr, pin::Pin}; + +use async_channel::Sender; +use async_trait::async_trait; +use http_body_util::combinators::BoxBody; +use hyper::{body::Bytes, upgrade::Upgraded, HeaderMap, Request, Response, StatusCode, Uri}; +use hyper_tungstenite::HyperWebsocket; +use tokio::runtime::Handle; +use yaml_rust2::Yaml; + +#[path = "log.rs"] +mod log; +#[path = "with_runtime.rs"] +mod with_runtime; + +/// Contains information about a network socket, including remote and local addresses, +/// and whether the connection is encrypted. +pub struct SocketData { + /// The remote address of the socket. + pub remote_addr: SocketAddr, + /// The local address of the socket. + pub local_addr: SocketAddr, + /// Indicates if the connection is encrypted. + pub encrypted: bool, +} + +impl SocketData { + /// Creates a new `SocketData` instance. + /// + /// # Parameters + /// + /// - `remote_addr`: The remote address of the socket. + /// - `local_addr`: The local address of the socket. + /// - `encrypted`: A boolean indicating if the connection is encrypted. + /// + /// # Returns + /// + /// A new `SocketData` instance with the provided parameters. + pub fn new(remote_addr: SocketAddr, local_addr: SocketAddr, encrypted: bool) -> Self { + Self { + remote_addr, + local_addr, + encrypted, + } + } +} + +/// Represents a log message. This is a type alias for `crate::log::LogMessage`. +pub type LogMessage = log::LogMessage; + +/// Represents the server configuration object. This is a type alias for `Yaml` from the `yaml_rust2` crate. +pub type ServerConfig = Yaml; + +/// Represents the HTTP request from Hyper. +pub type HyperRequest = Request>; + +/// Represents the HTTP response from Hyper. +pub type HyperResponse = Response>; + +/// Represents the upgraded HTTP connection from Hyper. +pub type HyperUpgraded = Upgraded; + +/// A wrapper that ensures a function is executed within a specific runtime context. +/// This is a type alias for `crate::with_runtime::WithRuntime`. +pub type WithRuntime = with_runtime::WithRuntime; + +/// Contains data related to an HTTP request, including the original Hyper request +/// and optional authentication user information. +pub struct RequestData { + hyper_request: HyperRequest, + auth_user: Option, + original_url: Option, +} + +impl RequestData { + /// Creates a new `RequestData` instance. + /// + /// # Parameters + /// + /// - `hyper_request`: The original Hyper `Request` object. + /// - `auth_user`: An optional string representing the authenticated user. + /// + /// # Returns + /// + /// A new `RequestData` instance with the provided parameters. + pub fn new( + hyper_request: HyperRequest, + auth_user: Option, + original_url: Option, + ) -> Self { + Self { + hyper_request, + auth_user, + original_url, + } + } + + /// Sets the authenticated user for the request. + /// + /// # Parameters + /// + /// - `auth_user`: A string representing the authenticated user. + pub fn set_auth_user(&mut self, auth_user: String) { + self.auth_user = Some(auth_user); + } + + /// Retrieves the authenticated user associated with the request, if any. + /// + /// # Returns + /// + /// An `Option` containing a reference to the authenticated user's string, or `None` if not set. + pub fn get_auth_user(&self) -> Option<&str> { + match &self.auth_user { + Some(auth_user) => Some(auth_user), + None => None, + } + } + + /// Sets the original URL (before URL rewriting) for the request. + /// + /// # Parameters + /// + /// - `original_url`: An `Uri` object representing the original request URL before rewriting. + pub fn set_original_url(&mut self, original_url: Uri) { + self.original_url = Some(original_url); + } + + /// Retrieves the original URL (before URL rewriting) associated with the request, if any. + /// + /// # Returns + /// + /// An `Option` containing a reference to the `Uri` object representing the original request URL before rewriting, or `None` if not set. + pub fn get_original_url(&self) -> Option<&Uri> { + match &self.original_url { + Some(original_url) => Some(original_url), + None => None, + } + } + + /// Provides a reference to the underlying Hyper `Request` object. + /// + /// # Returns + /// + /// A reference to the `HyperRequest` object. + pub fn get_hyper_request(&self) -> &HyperRequest { + &self.hyper_request + } + + /// Provides a mutable reference to the underlying Hyper `Request` object. + /// + /// # Returns + /// + /// A mutable reference to the `HyperRequest` object. + pub fn get_mut_hyper_request(&mut self) -> &mut HyperRequest { + &mut self.hyper_request + } + + /// Consumes the `RequestData` instance and returns its components. + /// + /// # Returns + /// + /// A tuple containing the `HyperRequest` object, an optional authenticated user string, and an optional `Uri` object representing the original request URL before rewriting. + pub fn into_parts(self) -> (HyperRequest, Option, Option) { + (self.hyper_request, self.auth_user, self.original_url) + } +} + +/// Facilitates logging of error messages through a provided logger sender. +pub struct ErrorLogger { + logger: Option>, +} + +impl ErrorLogger { + /// Creates a new `ErrorLogger` instance. + /// + /// # Parameters + /// + /// - `logger`: A `Sender` used for sending log messages. + /// + /// # Returns + /// + /// A new `ErrorLogger` instance associated with the provided logger. + pub fn new(logger: Sender) -> Self { + Self { + logger: Some(logger), + } + } + + /// Creates a new `ErrorLogger` instance without any underlying logger. + /// + /// # Returns + /// + /// A new `ErrorLogger` instance not associated with any logger. + pub fn without_logger() -> Self { + Self { logger: None } + } + + /// Logs an error message asynchronously. + /// + /// # Parameters + /// + /// - `message`: A string slice containing the error message to be logged. + /// + /// # Examples + /// + /// ``` + /// # use crate::ferron_common::ErrorLogger; + /// # #[tokio::main] + /// # async fn main() { + /// let (tx, mut rx) = async_channel::bounded(100); + /// let logger = ErrorLogger::new(tx); + /// logger.log("An error occurred").await; + /// # } + /// ``` + pub async fn log(&self, message: &str) { + if let Some(logger) = &self.logger { + logger + .send(LogMessage::new(String::from(message), true)) + .await + .unwrap_or_default(); + } + } +} + +impl Clone for ErrorLogger { + /// Clone a `ErrorLogger`. + /// + /// # Returns + /// + /// A cloned `ErrorLogger` instance + fn clone(&self) -> Self { + Self { + logger: self.logger.clone(), + } + } +} + +/// Holds data related to an HTTP response, including the original request, +/// optional authentication user information, and the response details. +pub struct ResponseData { + request: Option, + auth_user: Option, + original_url: Option, + response: Option>>, + response_status: Option, + response_headers: Option, + new_remote_address: Option, + parallel_fn: Option + Send>>>, +} + +impl ResponseData { + /// Initiates the building process for a `ResponseData` instance using a `RequestData` object. + /// + /// # Parameters + /// + /// - `request`: A `RequestData` instance containing the original request and authentication information. + /// + /// # Returns + /// + /// A `ResponseDataBuilder` initialized with the provided request data. + pub fn builder(request: RequestData) -> ResponseDataBuilder { + let (request, auth_user, original_url) = request.into_parts(); + + ResponseDataBuilder { + request: Some(request), + auth_user, + original_url, + response: None, + response_status: None, + response_headers: None, + new_remote_address: None, + parallel_fn: None, + } + } + + /// Initiates the building process for a `ResponseData` instance without a `RequestData` object. + /// + /// # Returns + /// + /// A `ResponseDataBuilder` initialized without any request data. + pub fn builder_without_request() -> ResponseDataBuilder { + ResponseDataBuilder { + request: None, + auth_user: None, + original_url: None, + response: None, + response_status: None, + response_headers: None, + new_remote_address: None, + parallel_fn: None, + } + } + + /// Consumes the `ResponseData` instance and returns its components. + /// + /// # Returns + /// + /// A tuple containing: + /// - The optional original `HyperRequest` object. + /// - An optional authenticated user string. + /// - An optional `Uri` object representing the original request URL (before rewriting) + /// - An optional `Response` object encapsulated in a `BoxBody` with `Bytes` and `std::io::Error`. + /// - An optional HTTP `StatusCode`. + /// - An optional `HeaderMap` containing the HTTP headers. + /// - An optional `SocketAddr` containing the client's new IP address and port. + /// - An optional `Future` with `()` output that would be executed in parallel. + #[allow(clippy::type_complexity)] + pub fn into_parts( + self, + ) -> ( + Option, + Option, + Option, + Option>>, + Option, + Option, + Option, + Option + Send>>>, + ) { + ( + self.request, + self.auth_user, + self.original_url, + self.response, + self.response_status, + self.response_headers, + self.new_remote_address, + self.parallel_fn, + ) + } +} + +pub struct ResponseDataBuilder { + request: Option, + auth_user: Option, + original_url: Option, + response: Option>>, + response_status: Option, + response_headers: Option, + new_remote_address: Option, + parallel_fn: Option + Send>>>, +} + +impl ResponseDataBuilder { + /// Sets the response for the `ResponseData`. + /// + /// # Parameters + /// + /// - `response`: A `Response` object encapsulated in a `BoxBody` with `Bytes` and `std::io::Error`. + /// + /// # Returns + /// + /// The updated `ResponseDataBuilder` instance with the specified response. + pub fn response(mut self, response: Response>) -> Self { + self.response = Some(response); + self + } + + /// Sets the status code for the `ResponseData`. + /// + /// # Parameters + /// + /// - `status`: A `StatusCode` representing the HTTP status code. + /// + /// # Returns + /// + /// The updated `ResponseDataBuilder` instance with the specified status code. + pub fn status(mut self, status: StatusCode) -> Self { + self.response_status = Some(status); + self + } + + /// Sets the headers for the `ResponseData`. + /// + /// # Parameters + /// + /// - `headers`: A `HeaderMap` containing the HTTP headers. + /// + /// # Returns + /// + /// The updated `ResponseDataBuilder` instance with the specified headers. + pub fn headers(mut self, headers: HeaderMap) -> Self { + self.response_headers = Some(headers); + self + } + + /// Sets the new client address for the `ResponseData`. + /// + /// # Parameters + /// + /// - `new_remote_address`: A `SocketAddr` containing the new client's IP address and port. + /// + /// # Returns + /// + /// The updated `ResponseDataBuilder` instance with the specified headers. + pub fn new_remote_address(mut self, new_remote_address: SocketAddr) -> Self { + self.new_remote_address = Some(new_remote_address); + self + } + + /// Sets the function to be executed in parallel. + /// + /// # Parameters + /// + /// - `parallel_fn`: A `Future` with `()` output. + /// + /// # Returns + /// + /// The updated `ResponseDataBuilder` instance with the specified function to be executed in parallel. + pub fn parallel_fn(mut self, parallel_fn: impl Future + Send + 'static) -> Self { + self.parallel_fn = Some(Box::pin(parallel_fn)); + self + } + + /// Builds the `ResponseData` instance. + /// + /// # Returns + /// + /// A `ResponseData` object containing the accumulated data from the builder. + pub fn build(self) -> ResponseData { + ResponseData { + request: self.request, + auth_user: self.auth_user, + original_url: self.original_url, + response: self.response, + response_status: self.response_status, + response_headers: self.response_headers, + new_remote_address: self.new_remote_address, + parallel_fn: self.parallel_fn, + } + } +} + +/// Defines the interface for server module handlers, specifying how requests should be processed. +#[async_trait] +pub trait ServerModuleHandlers { + /// Handles an incoming request. + /// + /// # Parameters + /// + /// - `request`: A `RequestData` object containing the incoming request and associated data. + /// - `config`: A reference to the combined server configuration (`ServerConfig`). The combined configuration has properties in its root. + /// - `socket_data`: A reference to the `SocketData` containing socket-related information. + /// - `error_logger`: A reference to an `ErrorLogger` for logging errors. + /// + /// # Returns + /// + /// A `Result` containing a `ResponseData` object upon success, or a boxed `dyn Error` if an error occurs. + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result>; + + /// Handles an incoming forward proxy request (not using CONNECT method). + /// + /// # Parameters + /// + /// - `request`: A `RequestData` object containing the incoming request and associated data. + /// - `config`: A reference to the combined server configuration (`ServerConfig`). The combined configuration has properties in its root. + /// - `socket_data`: A reference to the `SocketData` containing socket-related information. + /// - `error_logger`: A reference to an `ErrorLogger` for logging errors. + /// + /// # Returns + /// + /// A `Result` containing a `ResponseData` object upon success, or a boxed `dyn Error` if an error occurs. + async fn proxy_request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result>; + + /// Modifies an outgoing response before it is sent to the client. + /// + /// This function allows for inspection and modification of the response generated by the server + /// or other handlers. Implementers can use this to add, remove, or alter headers, change the + /// status code, or modify the body of the response as needed. + /// + /// # Parameters + /// + /// - `response`: A `HyperResponse` object representing the outgoing HTTP response. + /// + /// # Returns + /// + /// A `Result` containing the potentially modified `HyperResponse` object upon success, or a boxed + /// `dyn Error` if an error occurs during processing. + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result>; + + /// Modifies an outgoing response for forward proxy requests (not using CONNECT method) before it is sent to the client. + /// + /// This function allows for inspection and modification of the response generated by the server + /// or other handlers. Implementers can use this to add, remove, or alter headers, change the + /// status code, or modify the body of the response as needed. + /// + /// # Parameters + /// + /// - `response`: A `HyperResponse` object representing the outgoing HTTP response. + /// + /// # Returns + /// + /// A `Result` containing the potentially modified `HyperResponse` object upon success, or a boxed + /// `dyn Error` if an error occurs during processing. + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result>; + + /// Handles an incoming forward proxy request (using CONNECT method). + /// + /// # Parameters + /// + /// - `upgraded_request`: A `HyperUpgraded` object containing the upgraded HTTP connection. + /// - `connect_address`: A reference to a string containing the address and port number of the destination server (for example "example.com:443"). + /// - `config`: A reference to the combined server configuration (`ServerConfig`). The combined configuration has properties in its root. + /// - `socket_data`: A reference to the `SocketData` containing socket-related information. + /// - `error_logger`: A reference to an `ErrorLogger` for logging errors. + /// + /// # Returns + /// + /// A `Result` containing an empty value upon success, or a boxed `dyn Error` if an error occurs. + async fn connect_proxy_request_handler( + &mut self, + upgraded_request: HyperUpgraded, + connect_address: &str, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result<(), Box>; + + /// Checks if the module is a forward proxy module utilizing CONNECT method. + /// + /// # Returns + /// + /// `true` if the module is a forward proxy module utlilzing CONNECT method, or `false` otherwise. + fn does_connect_proxy_requests(&mut self) -> bool; + + /// Handles an incoming WebSocket request. + /// + /// # Parameters + /// + /// - `websocket`: A `HyperWebsocket` object containing a future that resolves to a WebSocket stream. + /// - `uri`: A `hyper::Uri` object containig the HTTP request URI. + /// - `config`: A reference to the combined server configuration (`ServerConfig`). The combined configuration has properties in its root. + /// - `socket_data`: A reference to the `SocketData` containing socket-related information. + /// - `error_logger`: A reference to an `ErrorLogger` for logging errors. + /// + /// # Returns + /// + /// A `Result` containing an empty value upon success, or a boxed `dyn Error` if an error occurs. + async fn websocket_request_handler( + &mut self, + websocket: HyperWebsocket, + uri: &hyper::Uri, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result<(), Box>; + + /// Checks if the module is a module supporting WebSocket requests. + /// + /// # Parameters + /// + /// - `config`: A reference to the combined server configuration (`ServerConfig`). The combined configuration has properties in its root. + /// - `socket_data`: A reference to the `SocketData` containing socket-related information. + /// + /// # Returns + /// + /// `true` if the module is a module supporting WebSocket requests, or `false` otherwise. + fn does_websocket_requests(&mut self, config: &ServerConfig, socket_data: &SocketData) -> bool; +} + +/// Represents a server module that can provide handlers for processing requests. +pub trait ServerModule { + /// Retrieves the handlers associated with the server module. + /// + /// # Parameters + /// + /// - `handle`: A `Handle` to the Tokio runtime. + /// + /// # Returns + /// + /// A boxed object implementing `ServerModuleHandlers` that can be sent across threads. + fn get_handlers(&self, handle: Handle) -> Box; +} diff --git a/ferron/src/common/with_runtime.rs b/ferron/src/common/with_runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..18bc23ccfaab711f1d04e1ec813fda1de2ca1e37 --- /dev/null +++ b/ferron/src/common/with_runtime.rs @@ -0,0 +1,56 @@ +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tokio::runtime::Handle; + +pin_project! { + /// A future that executes within a specific Tokio runtime. + /// + /// This struct ensures that the wrapped future (`fut`) is polled within the context of the provided Tokio runtime handle (`runtime`). + pub struct WithRuntime { + runtime: Handle, + #[pin] + fut: F, + } +} + +impl WithRuntime { + /// Creates a new `WithRuntime` instance. + /// + /// # Parameters + /// + /// - `runtime`: A `Handle` to the Tokio runtime in which the future should be executed. + /// - `fut`: The future to be executed within the specified runtime. + /// + /// # Returns + /// + /// A `WithRuntime` object encapsulating the provided runtime handle and future. + pub fn new(runtime: Handle, fut: F) -> Self { + Self { runtime, fut } + } +} + +impl Future for WithRuntime +where + F: Future, +{ + type Output = F::Output; + + /// Polls the wrapped future within the context of the specified Tokio runtime. + /// + /// # Parameters + /// + /// - `ctx`: The current task context. + /// + /// # Returns + /// + /// A `Poll` indicating the state of the wrapped future (`Pending` or `Ready`). + fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { + let this = self.project(); + let _guard = this.runtime.enter(); + this.fut.poll(ctx) + } +} diff --git a/ferron/src/main.rs b/ferron/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..733e83e39d5ec78bbb02ce3c033bf544031e3669 --- /dev/null +++ b/ferron/src/main.rs @@ -0,0 +1,478 @@ +// Import server module from "server.rs" +#[path = "server.rs"] +mod ferron_server; + +// Import request handler module from "request_handler.rs" +#[path = "request_handler.rs"] +mod ferron_request_handler; + +// Import resources from "res" directory +#[path = "res"] +mod ferron_res { + pub mod server_software; +} + +// Import common modules from "common" directory +#[path = "common/mod.rs"] +mod ferron_common; + +// Import utility modules from "util" directory +#[path = "util"] +mod ferron_util { + pub mod anti_xss; + #[cfg(feature = "asgi")] + pub mod asgi_messages; + #[cfg(feature = "asgi")] + pub mod asgi_structs; + #[cfg(any(feature = "cgi", feature = "scgi", feature = "fcgi"))] + pub mod cgi_response; + pub mod combine_config; + #[cfg(any(feature = "cgi", feature = "scgi", feature = "fcgi"))] + pub mod copy_move; + pub mod error_pages; + #[cfg(feature = "fcgi")] + pub mod fcgi_decoder; + #[cfg(feature = "fcgi")] + pub mod fcgi_encoder; + #[cfg(feature = "fcgi")] + pub mod fcgi_name_value_pair; + #[cfg(feature = "fcgi")] + pub mod fcgi_record; + pub mod generate_directory_listing; + pub mod ip_blocklist; + pub mod ip_match; + pub mod load_config; + pub mod load_tls; + pub mod match_hostname; + pub mod match_location; + #[cfg(any(feature = "rproxy", feature = "fauth"))] + pub mod no_server_verifier; + pub mod non_standard_code_structs; + #[cfg(all(unix, feature = "wsgid"))] + pub mod preforked_process_pool; + #[cfg(feature = "fcgi")] + pub mod read_to_end_move; + pub mod sizify; + pub mod sni; + #[cfg(feature = "fcgi")] + pub mod split_stream_by_map; + pub mod ttl_cache; + pub mod url_rewrite_structs; + pub mod url_sanitizer; + pub mod validate_config; + #[cfg(feature = "wsgi")] + pub mod wsgi_error_stream; + #[cfg(feature = "wsgi")] + pub mod wsgi_input_stream; + #[cfg(any(feature = "wsgi", feature = "wsgid"))] + pub mod wsgi_load_application; + #[cfg(feature = "wsgi")] + pub mod wsgi_structs; + #[cfg(feature = "wsgid")] + pub mod wsgid_body_reader; + #[cfg(feature = "wsgid")] + pub mod wsgid_error_stream; + #[cfg(feature = "wsgid")] + pub mod wsgid_input_stream; + #[cfg(feature = "wsgid")] + pub mod wsgid_message_structs; + #[cfg(feature = "wsgid")] + pub mod wsgid_structs; +} + +// Import project modules from "modules" directory +#[path = "modules"] +mod ferron_modules { + pub mod blocklist; + pub mod default_handler_checks; + pub mod non_standard_codes; + pub mod redirect_trailing_slashes; + pub mod redirects; + pub mod static_file_serving; + pub mod url_rewrite; + pub mod x_forwarded_for; +} + +// Import optional project modules from "modules" directory +#[path = "optional_modules"] +mod ferron_optional_modules { + #[cfg(feature = "asgi")] + pub mod asgi; + #[cfg(feature = "cache")] + pub mod cache; + #[cfg(feature = "cgi")] + pub mod cgi; + #[cfg(feature = "example")] + pub mod example; + #[cfg(feature = "fauth")] + pub mod fauth; + #[cfg(feature = "fcgi")] + pub mod fcgi; + #[cfg(feature = "fproxy")] + pub mod fproxy; + #[cfg(feature = "rproxy")] + pub mod rproxy; + #[cfg(feature = "scgi")] + pub mod scgi; + #[cfg(feature = "wsgi")] + pub mod wsgi; + #[cfg(feature = "wsgid")] + pub mod wsgid; +} + +// Standard library imports +use std::sync::Arc; +use std::{error::Error, path::PathBuf}; + +// External crate imports +use clap::Parser; +use ferron_server::start_server; +use ferron_util::load_config::load_config; +use mimalloc::MiMalloc; + +// Set the global allocator to use mimalloc for performance optimization +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; + +// Struct for command-line arguments +/// A fast, memory-safe web server written in Rust +#[derive(Parser, Debug)] +#[command(name = "Ferron")] +#[command(version, about, long_about = None)] +struct Args { + /// The path to the server configuration file + #[arg(short, long, default_value_t = String::from("./ferron.yaml"))] + config: String, +} + +// Function to execute before starting the server +#[allow(clippy::type_complexity)] +fn before_starting_server( + args: &Args, + first_start: bool, +) -> Result> { + // Load the configuration + let yaml_config = load_config(PathBuf::from(args.config.clone()))?; + + let mut module_error = None; + let mut module_libs = Vec::new(); + + // Load external modules defined in the configuration file + if let Some(modules) = yaml_config["global"]["loadModules"].as_vec() { + for module_name_yaml in modules.iter() { + if let Some(module_name) = module_name_yaml.as_str() { + module_libs.push(String::from(module_name)); + } + } + } + + let mut external_modules = Vec::new(); + #[allow(unused_mut)] + let mut modules_optional_builtin = Vec::new(); + // Iterate over loaded module libraries and initialize them + for module_name in module_libs.iter() { + match module_name as &str { + #[cfg(feature = "rproxy")] + "rproxy" => { + external_modules.push( + match ferron_optional_modules::rproxy::server_module_init(&yaml_config) { + Ok(module) => module, + Err(err) => { + module_error = Some(anyhow::anyhow!( + "Cannot initialize optional built-in module \"{}\": {}", + module_name, + err + )); + break; + } + }, + ); + + modules_optional_builtin.push(module_name.clone()); + } + #[cfg(feature = "fproxy")] + "fproxy" => { + external_modules.push( + match ferron_optional_modules::fproxy::server_module_init(&yaml_config) { + Ok(module) => module, + Err(err) => { + module_error = Some(anyhow::anyhow!( + "Cannot initialize optional built-in module \"{}\": {}", + module_name, + err + )); + break; + } + }, + ); + + modules_optional_builtin.push(module_name.clone()); + } + #[cfg(feature = "cache")] + "cache" => { + external_modules.push( + match ferron_optional_modules::cache::server_module_init(&yaml_config) { + Ok(module) => module, + Err(err) => { + module_error = Some(anyhow::anyhow!( + "Cannot initialize optional built-in module \"{}\": {}", + module_name, + err + )); + break; + } + }, + ); + + modules_optional_builtin.push(module_name.clone()); + } + #[cfg(feature = "cgi")] + "cgi" => { + external_modules.push( + match ferron_optional_modules::cgi::server_module_init(&yaml_config) { + Ok(module) => module, + Err(err) => { + module_error = Some(anyhow::anyhow!( + "Cannot initialize optional built-in module \"{}\": {}", + module_name, + err + )); + break; + } + }, + ); + + modules_optional_builtin.push(module_name.clone()); + } + #[cfg(feature = "scgi")] + "scgi" => { + external_modules.push( + match ferron_optional_modules::scgi::server_module_init(&yaml_config) { + Ok(module) => module, + Err(err) => { + module_error = Some(anyhow::anyhow!( + "Cannot initialize optional built-in module \"{}\": {}", + module_name, + err + )); + break; + } + }, + ); + + modules_optional_builtin.push(module_name.clone()); + } + #[cfg(feature = "fcgi")] + "fcgi" => { + external_modules.push( + match ferron_optional_modules::fcgi::server_module_init(&yaml_config) { + Ok(module) => module, + Err(err) => { + module_error = Some(anyhow::anyhow!( + "Cannot initialize optional built-in module \"{}\": {}", + module_name, + err + )); + break; + } + }, + ); + + modules_optional_builtin.push(module_name.clone()); + } + #[cfg(feature = "fauth")] + "fauth" => { + external_modules.push( + match ferron_optional_modules::fauth::server_module_init(&yaml_config) { + Ok(module) => module, + Err(err) => { + module_error = Some(anyhow::anyhow!( + "Cannot initialize optional built-in module \"{}\": {}", + module_name, + err + )); + break; + } + }, + ); + + modules_optional_builtin.push(module_name.clone()); + } + #[cfg(feature = "example")] + "example" => { + external_modules.push( + match ferron_optional_modules::example::server_module_init(&yaml_config) { + Ok(module) => module, + Err(err) => { + module_error = Some(anyhow::anyhow!( + "Cannot initialize optional built-in module \"{}\": {}", + module_name, + err + )); + break; + } + }, + ); + + modules_optional_builtin.push(module_name.clone()); + } + #[cfg(feature = "wsgi")] + "wsgi" => { + external_modules.push( + match ferron_optional_modules::wsgi::server_module_init(&yaml_config) { + Ok(module) => module, + Err(err) => { + module_error = Some(anyhow::anyhow!( + "Cannot initialize optional built-in module \"{}\": {}", + module_name, + err + )); + break; + } + }, + ); + + modules_optional_builtin.push(module_name.clone()); + } + #[cfg(feature = "wsgid")] + "wsgid" => { + external_modules.push( + match ferron_optional_modules::wsgid::server_module_init(&yaml_config) { + Ok(module) => module, + Err(err) => { + module_error = Some(anyhow::anyhow!( + "Cannot initialize optional built-in module \"{}\": {}", + module_name, + err + )); + break; + } + }, + ); + + modules_optional_builtin.push(module_name.clone()); + } + #[cfg(feature = "asgi")] + "asgi" => { + external_modules.push( + match ferron_optional_modules::asgi::server_module_init(&yaml_config) { + Ok(module) => module, + Err(err) => { + module_error = Some(anyhow::anyhow!( + "Cannot initialize optional built-in module \"{}\": {}", + module_name, + err + )); + break; + } + }, + ); + + modules_optional_builtin.push(module_name.clone()); + } + _ => { + module_error = Some(anyhow::anyhow!( + "The optional built-in module \"{}\" doesn't exist", + module_name + )); + break; + } + } + } + + // Add modules (both built-in and loaded) + let mut modules = Vec::new(); + match ferron_modules::x_forwarded_for::server_module_init() { + Ok(module) => modules.push(module), + Err(err) => { + if module_error.is_none() { + module_error = Some(anyhow::anyhow!("Cannot load a built-in module: {}", err)); + } + } + }; + match ferron_modules::redirects::server_module_init() { + Ok(module) => modules.push(module), + Err(err) => { + if module_error.is_none() { + module_error = Some(anyhow::anyhow!("Cannot load a built-in module: {}", err)); + } + } + }; + match ferron_modules::blocklist::server_module_init(&yaml_config) { + Ok(module) => modules.push(module), + Err(err) => { + if module_error.is_none() { + module_error = Some(anyhow::anyhow!("Cannot load a built-in module: {}", err)); + } + } + }; + match ferron_modules::url_rewrite::server_module_init(&yaml_config) { + Ok(module) => modules.push(module), + Err(err) => { + if module_error.is_none() { + module_error = Some(anyhow::anyhow!("Cannot load a built-in module: {}", err)); + } + } + }; + match ferron_modules::non_standard_codes::server_module_init(&yaml_config) { + Ok(module) => modules.push(module), + Err(err) => { + if module_error.is_none() { + module_error = Some(anyhow::anyhow!("Cannot load a built-in module: {}", err)); + } + } + }; + match ferron_modules::redirect_trailing_slashes::server_module_init() { + Ok(module) => modules.push(module), + Err(err) => { + if module_error.is_none() { + module_error = Some(anyhow::anyhow!("Cannot load a built-in module: {}", err)); + } + } + }; + modules.append(&mut external_modules); + match ferron_modules::default_handler_checks::server_module_init() { + Ok(module) => modules.push(module), + Err(err) => { + if module_error.is_none() { + module_error = Some(anyhow::anyhow!("Cannot load a built-in module: {}", err)); + } + } + }; + match ferron_modules::static_file_serving::server_module_init() { + Ok(module) => modules.push(module), + Err(err) => { + if module_error.is_none() { + module_error = Some(anyhow::anyhow!("Cannot load a built-in module: {}", err)); + } + } + }; + + // Start the server with configuration and loaded modules + start_server( + Arc::new(yaml_config), + modules, + module_error, + modules_optional_builtin, + first_start, + ) +} + +// Entry point of the application +fn main() { + let args = &Args::parse(); // Parse command-line arguments + let mut first_start = true; + loop { + match before_starting_server(args, first_start) { + Ok(false) => break, + Ok(true) => { + first_start = false; + println!("Reloading the server configuration..."); + } + Err(err) => { + eprintln!("FATAL ERROR: {}", err); + std::process::exit(1); + } + } + } +} diff --git a/ferron/src/modules/blocklist.rs b/ferron/src/modules/blocklist.rs new file mode 100644 index 0000000000000000000000000000000000000000..a20f1580f659aea0b0d864221b47845211c8aefd --- /dev/null +++ b/ferron/src/modules/blocklist.rs @@ -0,0 +1,135 @@ +use std::error::Error; +use std::sync::Arc; + +use crate::ferron_common::{ + ErrorLogger, HyperResponse, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperUpgraded, WithRuntime}; +use async_trait::async_trait; +use hyper::StatusCode; +use hyper_tungstenite::HyperWebsocket; +use tokio::runtime::Handle; + +use crate::ferron_util::ip_blocklist::IpBlockList; + +struct BlockListModule { + blocklist: Arc, +} + +pub fn server_module_init( + config: &ServerConfig, +) -> Result, Box> { + let blocklist_vec = match config["global"]["blocklist"].as_vec() { + Some(blocklist_vec) => blocklist_vec, + None => &Vec::new(), + }; + + let mut blocklist_str_vec = Vec::new(); + for blocked_yaml in blocklist_vec.iter() { + if let Some(blocked) = blocked_yaml.as_str() { + blocklist_str_vec.push(blocked); + } + } + + let mut blocklist = IpBlockList::new(); + blocklist.load_from_vec(blocklist_str_vec); + + Ok(Box::new(BlockListModule::new(Arc::new(blocklist)))) +} + +impl BlockListModule { + fn new(blocklist: Arc) -> Self { + Self { blocklist } + } +} + +impl ServerModule for BlockListModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(BlockListModuleHandlers { + blocklist: self.blocklist.clone(), + handle, + }) + } +} +struct BlockListModuleHandlers { + blocklist: Arc, + handle: Handle, +} + +#[async_trait] +impl ServerModuleHandlers for BlockListModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + if self.blocklist.is_blocked(socket_data.remote_addr.ip()) { + return Ok( + ResponseData::builder(request) + .status(StatusCode::FORBIDDEN) + .build(), + ); + } + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} diff --git a/ferron/src/modules/default_handler_checks.rs b/ferron/src/modules/default_handler_checks.rs new file mode 100644 index 0000000000000000000000000000000000000000..da91dfef0ad4c44880a5d8b01d2dcf21d3017170 --- /dev/null +++ b/ferron/src/modules/default_handler_checks.rs @@ -0,0 +1,134 @@ +use std::error::Error; + +use crate::ferron_common::{ + ErrorLogger, HyperResponse, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperUpgraded, WithRuntime}; +use async_trait::async_trait; +use http_body_util::{BodyExt, Empty}; +use hyper::header::HeaderValue; +use hyper::{header, HeaderMap, Method, Response, StatusCode}; +use hyper_tungstenite::HyperWebsocket; +use tokio::runtime::Handle; + +struct DefaultHandlerChecksModule; + +pub fn server_module_init( +) -> Result, Box> { + Ok(Box::new(DefaultHandlerChecksModule::new())) +} + +impl DefaultHandlerChecksModule { + fn new() -> Self { + Self + } +} + +impl ServerModule for DefaultHandlerChecksModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(DefaultHandlerChecksModuleHandlers { handle }) + } +} +struct DefaultHandlerChecksModuleHandlers { + handle: Handle, +} + +#[async_trait] +impl ServerModuleHandlers for DefaultHandlerChecksModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + match request.get_hyper_request().method() { + &Method::OPTIONS => Ok( + ResponseData::builder(request) + .response( + Response::builder() + .status(StatusCode::NO_CONTENT) + .header(header::ALLOW, "GET, POST, HEAD, OPTIONS") + .body(Empty::new().map_err(|e| match e {}).boxed()) + .unwrap_or_default(), + ) + .build(), + ), + &Method::GET | &Method::POST | &Method::HEAD => Ok(ResponseData::builder(request).build()), + _ => { + let mut header_map = HeaderMap::new(); + if let Ok(header_value) = HeaderValue::from_str("GET, POST, HEAD, OPTIONS") { + header_map.insert(header::ALLOW, header_value); + }; + Ok( + ResponseData::builder(request) + .status(StatusCode::METHOD_NOT_ALLOWED) + .headers(header_map) + .build(), + ) + } + } + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok( + ResponseData::builder(request) + .status(StatusCode::NOT_IMPLEMENTED) + .build(), + ) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} diff --git a/ferron/src/modules/non_standard_codes.rs b/ferron/src/modules/non_standard_codes.rs new file mode 100644 index 0000000000000000000000000000000000000000..680987935aaa6ba9d0488e7d0ea90bfbbef4107b --- /dev/null +++ b/ferron/src/modules/non_standard_codes.rs @@ -0,0 +1,537 @@ +use std::error::Error; +use std::sync::Arc; +use std::time::Duration; + +use crate::ferron_util::ip_blocklist::IpBlockList; +use crate::ferron_util::ip_match::ip_match; +use crate::ferron_util::match_hostname::match_hostname; +use crate::ferron_util::match_location::match_location; +use crate::ferron_util::non_standard_code_structs::{ + NonStandardCode, NonStandardCodesLocationWrap, NonStandardCodesWrap, +}; +use crate::ferron_util::ttl_cache::TtlCache; + +use crate::ferron_common::{ + ErrorLogger, HyperResponse, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperUpgraded, WithRuntime}; +use async_trait::async_trait; +use base64::{engine::general_purpose, Engine}; +use fancy_regex::RegexBuilder; +use http_body_util::{BodyExt, Empty}; +use hyper::header::HeaderValue; +use hyper::{header, HeaderMap, Response, StatusCode}; +use hyper_tungstenite::HyperWebsocket; +use password_auth::verify_password; +use tokio::runtime::Handle; +use tokio::sync::RwLock; +use yaml_rust2::Yaml; + +fn non_standard_codes_config_init( + non_standard_codes_list: &[Yaml], +) -> Result, anyhow::Error> { + let non_standard_codes_list_iter = non_standard_codes_list.iter(); + let mut non_standard_codes_list_vec = Vec::new(); + for non_standard_codes_list_entry in non_standard_codes_list_iter { + let status_code: u16 = match non_standard_codes_list_entry["scode"].as_i64() { + Some(scode) => scode.try_into()?, + None => { + return Err(anyhow::anyhow!( + "Non-standard codes must include a status code" + )); + } + }; + let regex = match non_standard_codes_list_entry["regex"].as_str() { + Some(regex_str) => match RegexBuilder::new(regex_str) + .case_insensitive(cfg!(windows)) + .build() + { + Ok(regex) => Some(regex), + Err(err) => { + return Err(anyhow::anyhow!( + "Invalid non-standard code regular expression: {}", + err.to_string() + )); + } + }, + None => None, + }; + let url = non_standard_codes_list_entry["url"] + .as_str() + .map(|s| s.to_string()); + + if regex.is_none() && url.is_none() { + return Err(anyhow::anyhow!( + "Non-standard codes must either include URL or a matching regular expression" + )); + } + + let location = non_standard_codes_list_entry["location"] + .as_str() + .map(|s| s.to_string()); + let realm = non_standard_codes_list_entry["realm"] + .as_str() + .map(|s| s.to_string()); + let disable_brute_force_protection = non_standard_codes_list_entry["disableBruteProtection"] + .as_bool() + .unwrap_or(false); + let user_list = match non_standard_codes_list_entry["userList"].as_vec() { + Some(userlist) => { + let mut new_userlist = Vec::new(); + for user_yaml in userlist.iter() { + if let Some(user) = user_yaml.as_str() { + new_userlist.push(user.to_string()); + } + } + Some(new_userlist) + } + None => None, + }; + let users = match non_standard_codes_list_entry["users"].as_vec() { + Some(users_vec) => { + let mut users_str_vec = Vec::new(); + for user_yaml in users_vec.iter() { + if let Some(user) = user_yaml.as_str() { + users_str_vec.push(user); + } + } + + let mut users_init = IpBlockList::new(); + users_init.load_from_vec(users_str_vec); + Some(users_init) + } + None => None, + }; + non_standard_codes_list_vec.push(NonStandardCode::new( + status_code, + url, + regex, + location, + realm, + disable_brute_force_protection, + user_list, + users, + )); + } + + Ok(non_standard_codes_list_vec) +} + +pub fn server_module_init( + config: &ServerConfig, +) -> Result, Box> { + let mut global_non_standard_codes_list = Vec::new(); + let mut host_non_standard_codes_lists = Vec::new(); + if let Some(non_standard_codes_list_yaml) = config["global"]["nonStandardCodes"].as_vec() { + global_non_standard_codes_list = non_standard_codes_config_init(non_standard_codes_list_yaml)?; + } + + if let Some(hosts) = config["hosts"].as_vec() { + for host_yaml in hosts.iter() { + let domain = host_yaml["domain"].as_str().map(String::from); + let ip = host_yaml["ip"].as_str().map(String::from); + let mut locations = Vec::new(); + if let Some(locations_yaml) = host_yaml["locations"].as_vec() { + for location_yaml in locations_yaml.iter() { + if let Some(path_str) = location_yaml["path"].as_str() { + let path = String::from(path_str); + if let Some(non_standard_codes_list_yaml) = location_yaml["nonStandardCodes"].as_vec() { + locations.push(NonStandardCodesLocationWrap::new( + path, + non_standard_codes_config_init(non_standard_codes_list_yaml)?, + )); + } + } + } + } + if let Some(non_standard_codes_list_yaml) = host_yaml["nonStandardCodes"].as_vec() { + host_non_standard_codes_lists.push(NonStandardCodesWrap::new( + domain, + ip, + non_standard_codes_config_init(non_standard_codes_list_yaml)?, + locations, + )); + } else if !locations.is_empty() { + host_non_standard_codes_lists.push(NonStandardCodesWrap::new( + domain, + ip, + Vec::new(), + locations, + )); + } + } + } + + Ok(Box::new(NonStandardCodesModule::new( + Arc::new(global_non_standard_codes_list), + Arc::new(host_non_standard_codes_lists), + Arc::new(RwLock::new(TtlCache::new(Duration::new(300, 0)))), + ))) +} + +struct NonStandardCodesModule { + global_non_standard_codes_list: Arc>, + host_non_standard_codes_lists: Arc>, + brute_force_db: Arc>>, +} + +impl NonStandardCodesModule { + fn new( + global_non_standard_codes_list: Arc>, + host_non_standard_codes_lists: Arc>, + brute_force_db: Arc>>, + ) -> Self { + Self { + global_non_standard_codes_list, + host_non_standard_codes_lists, + brute_force_db, + } + } +} + +impl ServerModule for NonStandardCodesModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(NonStandardCodesModuleHandlers { + global_non_standard_codes_list: self.global_non_standard_codes_list.clone(), + host_non_standard_codes_lists: self.host_non_standard_codes_lists.clone(), + brute_force_db: self.brute_force_db.clone(), + handle, + }) + } +} + +fn parse_basic_auth(auth_str: &str) -> Option<(String, String)> { + if let Some(base64_credentials) = auth_str.strip_prefix("Basic ") { + if let Ok(decoded) = general_purpose::STANDARD.decode(base64_credentials) { + if let Ok(decoded_str) = std::str::from_utf8(&decoded) { + let parts: Vec<&str> = decoded_str.splitn(2, ':').collect(); + if parts.len() == 2 { + return Some((parts[0].to_string(), parts[1].to_string())); + } + } + } + } + None +} + +struct NonStandardCodesModuleHandlers { + global_non_standard_codes_list: Arc>, + host_non_standard_codes_lists: Arc>, + brute_force_db: Arc>>, + handle: Handle, +} + +#[async_trait] +impl ServerModuleHandlers for NonStandardCodesModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + let hyper_request = request.get_hyper_request(); + let global_non_standard_codes_list = self.global_non_standard_codes_list.iter(); + let empty_vector = Vec::new(); + let another_empty_vector = Vec::new(); + let mut host_non_standard_codes_list = empty_vector.iter(); + let mut location_non_standard_codes_list = another_empty_vector.iter(); + + // Should have used a HashMap instead of iterating over an array for better performance... + for host_non_standard_codes_list_wrap in self.host_non_standard_codes_lists.iter() { + if match_hostname( + match &host_non_standard_codes_list_wrap.domain { + Some(value) => Some(value as &str), + None => None, + }, + match hyper_request.headers().get(header::HOST) { + Some(value) => value.to_str().ok(), + None => None, + }, + ) && match &host_non_standard_codes_list_wrap.ip { + Some(value) => ip_match(value as &str, socket_data.remote_addr.ip()), + None => true, + } { + host_non_standard_codes_list = + host_non_standard_codes_list_wrap.non_standard_codes.iter(); + if let Ok(path_decoded) = urlencoding::decode( + request + .get_original_url() + .unwrap_or(request.get_hyper_request().uri()) + .path(), + ) { + for location_wrap in host_non_standard_codes_list_wrap.locations.iter() { + if match_location(&location_wrap.path, &path_decoded) { + location_non_standard_codes_list = location_wrap.non_standard_codes.iter(); + break; + } + } + } + break; + } + } + + let combined_non_standard_codes_list = global_non_standard_codes_list + .chain(host_non_standard_codes_list) + .chain(location_non_standard_codes_list); + + let request_url = format!( + "{}{}", + hyper_request.uri().path(), + match hyper_request.uri().query() { + Some(query) => format!("?{}", query), + None => String::from(""), + } + ); + + let mut auth_user = None; + + for non_standard_code in combined_non_standard_codes_list { + let mut redirect_url = None; + let mut url_matched = false; + + if let Some(users) = &non_standard_code.users { + if !users.is_blocked(socket_data.remote_addr.ip()) { + // Don't process this non-standard code + continue; + } + } + + if let Some(regex) = &non_standard_code.regex { + let regex_match_option = regex.find(&request_url)?; + if let Some(regex_match) = regex_match_option { + url_matched = true; + if non_standard_code.status_code == 301 + || non_standard_code.status_code == 302 + || non_standard_code.status_code == 307 + || non_standard_code.status_code == 308 + { + let matched_text = regex_match.as_str(); + if let Some(location) = &non_standard_code.location { + redirect_url = Some(regex.replace(matched_text, location).to_string()); + } + } + } + } + + if !url_matched { + if let Some(url) = &non_standard_code.url { + if url == hyper_request.uri().path() { + url_matched = true; + if non_standard_code.status_code == 301 + || non_standard_code.status_code == 302 + || non_standard_code.status_code == 307 + || non_standard_code.status_code == 308 + { + if let Some(location) = &non_standard_code.location { + redirect_url = Some(format!( + "{}{}", + location, + match hyper_request.uri().query() { + Some(query) => format!("?{}", query), + None => String::from(""), + } + )); + } + } + } + } + } + + if url_matched { + match non_standard_code.status_code { + 301 | 302 | 307 | 308 => { + return Ok( + ResponseData::builder(request) + .response( + Response::builder() + .status(StatusCode::from_u16(non_standard_code.status_code)?) + .header(header::LOCATION, redirect_url.unwrap_or(request_url)) + .body(Empty::new().map_err(|e| match e {}).boxed())?, + ) + .build(), + ); + } + 401 => { + let brute_force_db_key = socket_data.remote_addr.ip().to_string(); + if !non_standard_code.disable_brute_force_protection { + let rwlock_read = self.brute_force_db.read().await; + let current_attempts = rwlock_read.get(&brute_force_db_key).unwrap_or(0); + if current_attempts >= 10 { + error_logger + .log(&format!( + "Too many failed authorization attempts for client \"{}\"", + socket_data.remote_addr.ip() + )) + .await; + + return Ok( + ResponseData::builder(request) + .status(StatusCode::TOO_MANY_REQUESTS) + .build(), + ); + } + } + let mut header_map = HeaderMap::new(); + header_map.insert( + header::WWW_AUTHENTICATE, + HeaderValue::from_str(&format!( + "Basic realm=\"{}\", charset=\"UTF-8\"", + non_standard_code + .realm + .clone() + .unwrap_or("Ferron HTTP Basic Authorization".to_string()) + .replace("\\", "\\\\") + .replace("\"", "\\\"") + ))?, + ); + + if let Some(authorization_header_value) = + hyper_request.headers().get(header::AUTHORIZATION) + { + let authorization_str = match authorization_header_value.to_str() { + Ok(str) => str, + Err(_) => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + if let Some((username, password)) = parse_basic_auth(authorization_str) { + if let Some(users_vec_yaml) = config["users"].as_vec() { + let mut authorized_user = None; + for user_yaml in users_vec_yaml { + if let Some(username_db) = user_yaml["name"].as_str() { + if username_db != username { + continue; + } + if let Some(user_list) = &non_standard_code.user_list { + if !user_list.contains(&username) { + continue; + } + } + if let Some(password_hash_db) = user_yaml["pass"].as_str() { + let password_cloned = password.clone(); + let password_hash_db_cloned = password_hash_db.to_string(); + // Offload verifying the hash into a separate blocking thread. + let password_valid = tokio::task::spawn_blocking(move || { + verify_password(password_cloned, &password_hash_db_cloned).is_ok() + }) + .await?; + if password_valid { + authorized_user = Some(&username); + break; + } + } + } + } + if let Some(authorized_user) = authorized_user { + auth_user = Some(authorized_user.to_owned()); + continue; + } + } + + if !non_standard_code.disable_brute_force_protection { + let mut rwlock_write = self.brute_force_db.write().await; + rwlock_write.cleanup(); + let current_attempts = rwlock_write.get(&brute_force_db_key).unwrap_or(0); + rwlock_write.insert(brute_force_db_key, current_attempts + 1); + } + + error_logger + .log(&format!( + "Authorization failed for user \"{}\" and client \"{}\"", + username, + socket_data.remote_addr.ip() + )) + .await; + } + } + + return Ok( + ResponseData::builder(request) + .status(StatusCode::UNAUTHORIZED) + .headers(header_map) + .build(), + ); + } + _ => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::from_u16(non_standard_code.status_code)?) + .build(), + ) + } + } + } + } + + if auth_user.is_some() { + let (hyper_request, _, original_url) = request.into_parts(); + Ok(ResponseData::builder(RequestData::new(hyper_request, auth_user, original_url)).build()) + } else { + Ok(ResponseData::builder(request).build()) + } + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} diff --git a/ferron/src/modules/redirect_trailing_slashes.rs b/ferron/src/modules/redirect_trailing_slashes.rs new file mode 100644 index 0000000000000000000000000000000000000000..e670765e5f1dc059a58b359aff4fb874489205f9 --- /dev/null +++ b/ferron/src/modules/redirect_trailing_slashes.rs @@ -0,0 +1,235 @@ +use std::error::Error; +use std::path::Path; +use std::sync::Arc; +use std::time::Duration; + +use crate::ferron_common::{ + ErrorLogger, HyperResponse, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperUpgraded, WithRuntime}; +use async_trait::async_trait; +use http_body_util::{BodyExt, Empty}; +use hyper::{header, Response, StatusCode}; +use hyper_tungstenite::HyperWebsocket; +use tokio::fs; +use tokio::runtime::Handle; +use tokio::sync::RwLock; + +use crate::ferron_util::ttl_cache::TtlCache; + +pub fn server_module_init( +) -> Result, Box> { + let cache = Arc::new(RwLock::new(TtlCache::new(Duration::from_millis(100)))); + Ok(Box::new(RedirectTrailingSlashesModule::new(cache))) +} + +struct RedirectTrailingSlashesModule { + cache: Arc>>, +} + +impl RedirectTrailingSlashesModule { + fn new(cache: Arc>>) -> Self { + Self { cache } + } +} + +impl ServerModule for RedirectTrailingSlashesModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(RedirectTrailingSlashesModuleHandlers { + cache: self.cache.clone(), + handle, + }) + } +} + +struct RedirectTrailingSlashesModuleHandlers { + cache: Arc>>, + handle: Handle, +} + +#[async_trait] +impl ServerModuleHandlers for RedirectTrailingSlashesModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + if config["disableTrailingSlashRedirects"].as_bool() != Some(true) { + if let Some(wwwroot) = config["wwwroot"].as_str() { + let hyper_request = request.get_hyper_request(); + + let request_path = hyper_request.uri().path(); + let request_query = hyper_request.uri().query(); + let mut request_path_bytes = request_path.bytes(); + if request_path_bytes.len() < 1 || request_path_bytes.nth(0) != Some(b'/') { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + + match request_path_bytes.last() { + Some(b'/') | None => { + return Ok(ResponseData::builder(request).build()); + } + _ => { + let cache_key = format!( + "{}{}{}", + match config["ip"].as_str() { + Some(ip) => format!("{}-", ip), + None => String::from(""), + }, + match config["domain"].as_str() { + Some(domain) => format!("{}-", domain), + None => String::from(""), + }, + request_path + ); + + let read_rwlock = self.cache.read().await; + if let Some(is_directory) = read_rwlock.get(&cache_key) { + drop(read_rwlock); + if is_directory { + let new_request_uri = format!( + "{}/{}", + request_path, + match request_query { + Some(query) => format!("?{}", query), + None => String::from(""), + } + ); + return Ok( + ResponseData::builder(request) + .response( + Response::builder() + .status(StatusCode::MOVED_PERMANENTLY) + .header(header::LOCATION, new_request_uri) + .body(Empty::new().map_err(|e| match e {}).boxed())?, + ) + .build(), + ); + } + } else { + drop(read_rwlock); + + let path = Path::new(wwwroot); + let mut relative_path = &request_path[1..]; + while relative_path.as_bytes().first().copied() == Some(b'/') { + relative_path = &relative_path[1..]; + } + + let decoded_relative_path = match urlencoding::decode(relative_path) { + Ok(path) => path.to_string(), + Err(_) => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let joined_pathbuf = path.join(decoded_relative_path); + + match fs::metadata(joined_pathbuf).await { + Ok(metadata) => { + let is_directory = metadata.is_dir(); + let mut write_rwlock = self.cache.write().await; + write_rwlock.cleanup(); + write_rwlock.insert(cache_key, is_directory); + if is_directory { + let new_request_uri = format!( + "{}/{}", + request_path, + match request_query { + Some(query) => format!("?{}", query), + None => String::from(""), + } + ); + return Ok( + ResponseData::builder(request) + .response( + Response::builder() + .status(StatusCode::MOVED_PERMANENTLY) + .header(header::LOCATION, new_request_uri) + .body(Empty::new().map_err(|e| match e {}).boxed())?, + ) + .build(), + ); + } + } + Err(_) => { + let mut write_rwlock = self.cache.write().await; + write_rwlock.cleanup(); + write_rwlock.insert(cache_key, false); + } + } + } + } + }; + } + } + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} diff --git a/ferron/src/modules/redirects.rs b/ferron/src/modules/redirects.rs new file mode 100644 index 0000000000000000000000000000000000000000..44b4faf52a851d74fa406d69801a5cc2c7b1b818 --- /dev/null +++ b/ferron/src/modules/redirects.rs @@ -0,0 +1,245 @@ +use std::error::Error; + +use crate::ferron_common::{ + ErrorLogger, HyperResponse, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperUpgraded, WithRuntime}; +use async_trait::async_trait; +use http_body_util::{BodyExt, Empty}; +use hyper::{header, Response, StatusCode, Uri}; +use hyper_tungstenite::HyperWebsocket; +use tokio::runtime::Handle; + +struct RedirectsModule; + +pub fn server_module_init( +) -> Result, Box> { + Ok(Box::new(RedirectsModule::new())) +} + +impl RedirectsModule { + fn new() -> Self { + Self + } +} + +impl ServerModule for RedirectsModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(RedirectsModuleHandlers { handle }) + } +} +struct RedirectsModuleHandlers { + handle: Handle, +} + +#[async_trait] +impl ServerModuleHandlers for RedirectsModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + let hyper_request = request.get_hyper_request(); + + if config["secure"].as_bool() == Some(true) + && !socket_data.encrypted + && config["disableNonEncryptedServer"].as_bool() != Some(true) + && config["disableToHTTPSRedirect"].as_bool() != Some(true) + { + let host_header_option = hyper_request.headers().get(header::HOST); + let host_header = match host_header_option { + Some(header_data) => header_data.to_str()?, + None => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ) + } + }; + + let path_and_query_option = hyper_request.uri().path_and_query(); + let path_and_query = match path_and_query_option { + Some(path_and_query) => path_and_query.to_string(), + None => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ) + } + }; + + let mut parts: Vec<&str> = host_header.split(':').collect(); + + if parts.len() > 1 + && !(parts[0].starts_with('[') + && parts + .last() + .map(|part| part.ends_with(']')) + .unwrap_or(false)) + { + parts.pop(); + } + + let host_name = parts.join(":"); + + let new_uri = Uri::builder() + .scheme("https") + .authority(match config["sport"].as_i64() { + None | Some(443) => host_name, + Some(port) => format!("{}:{}", host_name, port), + }) + .path_and_query(path_and_query) + .build()?; + + return Ok( + ResponseData::builder(request) + .response( + Response::builder() + .status(StatusCode::MOVED_PERMANENTLY) + .header(header::LOCATION, new_uri.to_string()) + .body(Empty::new().map_err(|e| match e {}).boxed())?, + ) + .build(), + ); + } + + let domain_yaml = &config["domain"]; + let domain = domain_yaml.as_str(); + + if let Some(domain) = domain { + if config["wwwredirect"].as_bool() == Some(true) { + // Even more code rewritten from SVR.JS... + if let Some(host_header_value) = hyper_request.headers().get(header::HOST) { + let host_header = host_header_value.to_str()?; + + let path_and_query_option = hyper_request.uri().path_and_query(); + let path_and_query = match path_and_query_option { + Some(path_and_query) => path_and_query.to_string(), + None => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ) + } + }; + + let mut parts: Vec<&str> = host_header.split(':').collect(); + let mut host_port: Option<&str> = None; + + if parts.len() > 1 + && !(parts[0].starts_with('[') + && parts + .last() + .map(|part| part.ends_with(']')) + .unwrap_or(false)) + { + host_port = parts.pop(); + } + + let host_name = parts.join(":"); + + if host_name == domain && !host_name.starts_with("www.") { + let new_uri = Uri::builder() + .scheme(match socket_data.encrypted { + true => "https", + false => "http", + }) + .authority(match host_port { + Some(port) => format!("www.{}:{}", host_name, port), + None => host_name, + }) + .path_and_query(path_and_query) + .build()?; + + return Ok( + ResponseData::builder(request) + .response( + Response::builder() + .status(StatusCode::MOVED_PERMANENTLY) + .header(header::LOCATION, new_uri.to_string()) + .body(Empty::new().map_err(|e| match e {}).boxed())?, + ) + .build(), + ); + } + } + } + } + + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + if config["secure"].as_bool() == Some(true) + && !socket_data.encrypted + && config["disableNonEncryptedServer"].as_bool() != Some(true) + && config["disableToHTTPSRedirect"].as_bool() != Some(true) + { + return Ok( + ResponseData::builder(request) + .status(StatusCode::NOT_IMPLEMENTED) + .build(), + ); + } + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} diff --git a/ferron/src/modules/static_file_serving.rs b/ferron/src/modules/static_file_serving.rs new file mode 100644 index 0000000000000000000000000000000000000000..635684b469a71c14d73cdf7f33c020e321ffbe1c --- /dev/null +++ b/ferron/src/modules/static_file_serving.rs @@ -0,0 +1,928 @@ +use std::error::Error; +use std::fmt::Write; +use std::io::SeekFrom; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use crate::ferron_common::{ + ErrorLogger, HyperResponse, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperUpgraded, WithRuntime}; +use async_compression::tokio::bufread::{BrotliEncoder, DeflateEncoder, GzipEncoder, ZstdEncoder}; +use async_compression::zstd::CParameter; +use async_compression::Level; +use async_trait::async_trait; +use chrono::offset::Local; +use chrono::DateTime; +use futures_util::TryStreamExt; +use hashlink::LruCache; +use http::HeaderValue; +use http_body_util::{BodyExt, Empty, Full, StreamBody}; +use hyper::body::Bytes; +use hyper::{body::Frame, Response, StatusCode}; +use hyper::{header, HeaderMap, Method}; +use hyper_tungstenite::HyperWebsocket; +use sha2::{Digest, Sha256}; +use tokio::fs; +use tokio::io::{AsyncReadExt, AsyncSeekExt, BufReader}; +use tokio::runtime::Handle; +use tokio::sync::RwLock; +use tokio_util::io::ReaderStream; + +use crate::ferron_util::generate_directory_listing::generate_directory_listing; +use crate::ferron_util::ttl_cache::TtlCache; + +pub fn server_module_init( +) -> Result, Box> { + let pathbuf_cache = Arc::new(RwLock::new(TtlCache::new(Duration::from_millis(100)))); + let etag_cache = Arc::new(RwLock::new(LruCache::new(1000))); + Ok(Box::new(StaticFileServingModule::new( + pathbuf_cache, + etag_cache, + ))) +} + +struct StaticFileServingModule { + pathbuf_cache: Arc>>, + etag_cache: Arc>>, +} + +impl StaticFileServingModule { + fn new( + pathbuf_cache: Arc>>, + etag_cache: Arc>>, + ) -> Self { + Self { + pathbuf_cache, + etag_cache, + } + } +} + +impl ServerModule for StaticFileServingModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(StaticFileServingModuleHandlers { + pathbuf_cache: self.pathbuf_cache.clone(), + etag_cache: self.etag_cache.clone(), + handle, + }) + } +} +struct StaticFileServingModuleHandlers { + pathbuf_cache: Arc>>, + etag_cache: Arc>>, + handle: Handle, +} + +fn parse_range_header(range_str: &str, default_end: u64) -> Option<(u64, u64)> { + if let Some(range_part) = range_str.strip_prefix("bytes=") { + let parts: Vec<&str> = range_part.split('-').collect(); + if parts.len() == 2 { + if parts[0].is_empty() { + if let Ok(end) = u64::from_str(parts[1]) { + return Some((default_end - end + 1, default_end)); + } + } else if parts[1].is_empty() { + if let Ok(start) = u64::from_str(parts[0]) { + return Some((start, default_end)); + } + } else if !parts[0].is_empty() && !parts[1].is_empty() { + if let (Ok(start), Ok(end)) = (u64::from_str(parts[0]), u64::from_str(parts[1])) { + return Some((start, end)); + } + } + } + } + None +} + +fn extract_etag_inner(input: &str) -> Option { + // Remove the surrounding double quotes + let trimmed = input.trim_matches('"'); + + // Split the string at the hyphen and take the first part + let parts: Vec<&str> = trimmed.split('-').collect(); + if parts.is_empty() { + None + } else { + Some(parts[0].to_string()) + } +} + +#[async_trait] +impl ServerModuleHandlers for StaticFileServingModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + if let Some(wwwroot) = config["wwwroot"].as_str() { + let hyper_request = request.get_hyper_request(); + let request_path = hyper_request.uri().path(); + let mut request_path_bytes = request_path.bytes(); + if request_path_bytes.len() < 1 || request_path_bytes.nth(0) != Some(b'/') { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + + let original_request_path = request + .get_original_url() + .map_or(request_path, |u| u.path()); + + let cache_key = format!( + "{}{}{}", + match config["ip"].as_str() { + Some(ip) => format!("{}-", ip), + None => String::from(""), + }, + match config["domain"].as_str() { + Some(domain) => format!("{}-", domain), + None => String::from(""), + }, + request_path + ); + + let rwlock_read = self.pathbuf_cache.read().await; + let joined_pathbuf_option = rwlock_read.get(&cache_key); + drop(rwlock_read); + + let joined_pathbuf_cached = joined_pathbuf_option.is_some(); + let mut joined_pathbuf = match joined_pathbuf_option { + Some(joined_pathbuf) => joined_pathbuf, + None => { + let path = Path::new(wwwroot); + let mut relative_path = &request_path[1..]; + while relative_path.as_bytes().first().copied() == Some(b'/') { + relative_path = &relative_path[1..]; + } + + let decoded_relative_path = match urlencoding::decode(relative_path) { + Ok(path) => path.to_string(), + Err(_) => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + path.join(decoded_relative_path) + } + }; + + match fs::metadata(&joined_pathbuf).await { + Ok(mut metadata) => { + if !joined_pathbuf_cached { + if metadata.is_dir() { + let indexes = vec!["index.html", "index.htm", "index.xhtml"]; + for index in indexes { + let temp_joined_pathbuf = joined_pathbuf.join(index); + match fs::metadata(&temp_joined_pathbuf).await { + Ok(temp_metadata) => { + if temp_metadata.is_file() { + metadata = temp_metadata; + joined_pathbuf = temp_joined_pathbuf; + break; + } + } + Err(err) => match err.kind() { + tokio::io::ErrorKind::NotFound | tokio::io::ErrorKind::NotADirectory => { + continue; + } + tokio::io::ErrorKind::PermissionDenied => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::FORBIDDEN) + .build(), + ); + } + _ => Err(err)?, + }, + }; + } + } + let mut rwlock_write = self.pathbuf_cache.write().await; + rwlock_write.cleanup(); + rwlock_write.insert(cache_key, joined_pathbuf.clone()); + drop(rwlock_write); + } + + if metadata.is_file() { + // Check if compression is possible at all + let mut compression_possible = false; + + if config["enableCompression"].as_bool() != Some(false) { + // A hard-coded list of non-compressible file extension + let non_compressible_file_extensions = vec![ + "7z", + "air", + "amlx", + "apk", + "apng", + "appinstaller", + "appx", + "appxbundle", + "arj", + "au", + "avif", + "bdoc", + "boz", + "br", + "bz", + "bz2", + "caf", + "class", + "doc", + "docx", + "dot", + "dvi", + "ear", + "epub", + "flv", + "gdoc", + "gif", + "gsheet", + "gslides", + "gz", + "iges", + "igs", + "jar", + "jnlp", + "jp2", + "jpe", + "jpeg", + "jpf", + "jpg", + "jpg2", + "jpgm", + "jpm", + "jpx", + "kmz", + "latex", + "m1v", + "m2a", + "m2v", + "m3a", + "m4a", + "mesh", + "mk3d", + "mks", + "mkv", + "mov", + "mp2", + "mp2a", + "mp3", + "mp4", + "mp4a", + "mp4v", + "mpe", + "mpeg", + "mpg", + "mpg4", + "mpga", + "msg", + "msh", + "msix", + "msixbundle", + "odg", + "odp", + "ods", + "odt", + "oga", + "ogg", + "ogv", + "ogx", + "opus", + "p12", + "pdf", + "pfx", + "pgp", + "pkpass", + "png", + "pot", + "pps", + "ppt", + "pptx", + "qt", + "ser", + "silo", + "sit", + "snd", + "spx", + "stpxz", + "stpz", + "swf", + "tif", + "tiff", + "ubj", + "usdz", + "vbox-extpack", + "vrml", + "war", + "wav", + "weba", + "webm", + "wmv", + "wrl", + "x3dbz", + "x3dvz", + "xla", + "xlc", + "xlm", + "xls", + "xlsx", + "xlt", + "xlw", + "xpi", + "xps", + "zip", + "zst", + ]; + let file_extension = joined_pathbuf + .extension() + .map_or_else(|| "".to_string(), |ext| ext.to_string_lossy().to_string()); + let file_extension_compressible = + !non_compressible_file_extensions.contains(&(&file_extension as &str)); + + if metadata.len() > 256 && file_extension_compressible { + compression_possible = true; + } + } + + let vary; + + // Handle ETags + let mut etag_option = None; + if config["enableETag"].as_bool() != Some(false) { + let etag_cache_key = format!( + "{}-{}-{}", + joined_pathbuf.to_string_lossy(), + metadata.len(), + match metadata.modified() { + Ok(mtime) => { + let datetime: DateTime = mtime.into(); + datetime.format("%Y-%m-%d %H:%M:%S").to_string() + } + Err(_) => String::from(""), + } + ); + let rwlock_read = self.etag_cache.read().await; + // Had to use "peek", since "get" would mutate the LRU cache + let etag_locked_option = rwlock_read.peek(&etag_cache_key).cloned(); + drop(rwlock_read); + let etag = match etag_locked_option { + Some(etag) => etag, + None => { + let etag_cache_key_clone = etag_cache_key.clone(); + let etag = tokio::task::spawn_blocking(move || { + let mut hasher = Sha256::new(); + hasher.update(etag_cache_key_clone); + hasher + .finalize() + .iter() + .fold(String::new(), |mut output, b| { + let _ = write!(output, "{b:02x}"); + output + }) + }) + .await?; + + let mut rwlock_write = self.etag_cache.write().await; + rwlock_write.insert(etag_cache_key, etag.clone()); + drop(rwlock_write); + + etag + } + }; + + vary = if compression_possible { + "Accept-Encoding, If-Match, If-None-Match, Range" + } else { + "If-Match, If-None-Match, Range" + }; + + if let Some(if_none_match_value) = + hyper_request.headers().get(header::IF_NONE_MATCH) + { + match if_none_match_value.to_str() { + Ok(if_none_match) => { + if let Some(etag_extracted) = extract_etag_inner(if_none_match) { + if etag_extracted == etag { + let etag_original = if_none_match.to_string(); + return Ok( + ResponseData::builder(request) + .response( + Response::builder() + .status(StatusCode::NOT_MODIFIED) + .header(header::ETAG, etag_original) + .header(header::VARY, vary) + .body(Empty::new().map_err(|e| match e {}).boxed())?, + ) + .build(), + ); + } + } + } + Err(_) => { + let mut header_map = HeaderMap::new(); + if let Ok(vary) = HeaderValue::from_str(vary) { + header_map.insert(header::VARY, vary); + } + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .headers(header_map) + .build(), + ); + } + } + } + + if let Some(if_match_value) = hyper_request.headers().get(header::IF_MATCH) { + match if_match_value.to_str() { + Ok(if_match) => { + if if_match != "*" { + if let Some(etag_extracted) = extract_etag_inner(if_match) { + if etag_extracted != etag { + let mut header_map = HeaderMap::new(); + header_map.insert(header::ETAG, if_match_value.clone()); + if let Ok(vary) = HeaderValue::from_str(vary) { + header_map.insert(header::VARY, vary); + } + return Ok( + ResponseData::builder(request) + .status(StatusCode::PRECONDITION_FAILED) + .headers(header_map) + .build(), + ); + } + } + } + } + Err(_) => { + let mut header_map = HeaderMap::new(); + if let Ok(vary) = HeaderValue::from_str(vary) { + header_map.insert(header::VARY, vary); + } + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .headers(header_map) + .build(), + ); + } + } + } + etag_option = Some(etag); + } else { + vary = if compression_possible { + "Accept-Encoding, Range" + } else { + "Range" + }; + } + + let content_type_option = new_mime_guess::from_path(&joined_pathbuf) + .first() + .map(|mime_type| mime_type.to_string()); + + let range_header = match hyper_request.headers().get(header::RANGE) { + Some(value) => match value.to_str() { + Ok(value) => Some(value), + Err(_) => { + let mut header_map = HeaderMap::new(); + if let Ok(vary) = HeaderValue::from_str(vary) { + header_map.insert(header::VARY, vary); + } + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .headers(header_map) + .build(), + ); + } + }, + None => None, + }; + + if let Some(range_header) = range_header { + let file_length = metadata.len(); + if file_length == 0 { + let mut header_map = HeaderMap::new(); + if let Ok(vary) = HeaderValue::from_str(vary) { + header_map.insert(header::VARY, vary); + } + return Ok( + ResponseData::builder(request) + .status(StatusCode::RANGE_NOT_SATISFIABLE) + .headers(header_map) + .build(), + ); + } + if let Some((range_begin, range_end)) = + parse_range_header(range_header, file_length - 1) + { + if range_end > file_length - 1 + || range_begin > file_length - 1 + || range_begin > range_end + { + let mut header_map = HeaderMap::new(); + if let Ok(vary) = HeaderValue::from_str(vary) { + header_map.insert(header::VARY, vary); + } + return Ok( + ResponseData::builder(request) + .status(StatusCode::RANGE_NOT_SATISFIABLE) + .headers(header_map) + .build(), + ); + } + + let request_method = hyper_request.method(); + let content_length = range_end - range_begin + 1; + + // Build response + let mut response_builder = Response::builder() + .status(StatusCode::PARTIAL_CONTENT) + .header(header::CONTENT_LENGTH, content_length) + .header( + header::CONTENT_RANGE, + format!("bytes {}-{}/{}", range_begin, range_end, file_length), + ); + + if let Some(etag) = etag_option { + response_builder = response_builder.header(header::ETAG, etag); + } + + if let Some(content_type) = content_type_option { + response_builder = response_builder.header(header::CONTENT_TYPE, content_type); + } + + response_builder = response_builder.header(header::VARY, vary); + + let response = match request_method { + &Method::HEAD => { + response_builder.body(Empty::new().map_err(|e| match e {}).boxed())? + } + _ => { + // Open file for reading + let mut file = match fs::File::open(joined_pathbuf).await { + Ok(file) => file, + Err(err) => match err.kind() { + tokio::io::ErrorKind::NotFound | tokio::io::ErrorKind::NotADirectory => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::NOT_FOUND) + .build(), + ); + } + tokio::io::ErrorKind::PermissionDenied => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::FORBIDDEN) + .build(), + ); + } + _ => Err(err)?, + }, + }; + + // Seek and limit the file reader + file.seek(SeekFrom::Start(range_begin)).await?; + let file_limited = file.take(content_length); + + // Use BufReader for better performance. + let file_bufreader = BufReader::with_capacity(12800, file_limited); + + // Construct a boxed body + let reader_stream = ReaderStream::new(file_bufreader); + let stream_body = StreamBody::new(reader_stream.map_ok(Frame::data)); + let boxed_body = stream_body.boxed(); + + response_builder.body(boxed_body)? + } + }; + + return Ok(ResponseData::builder(request).response(response).build()); + } else { + let mut header_map = HeaderMap::new(); + if let Ok(vary) = HeaderValue::from_str(vary) { + header_map.insert(header::VARY, vary); + } + + return Ok( + ResponseData::builder(request) + .status(StatusCode::RANGE_NOT_SATISFIABLE) + .headers(header_map) + .build(), + ); + } + } else { + let mut use_gzip = false; + let mut use_deflate = false; + let mut use_brotli = false; + let mut use_zstd = false; + + if compression_possible { + let user_agent = match hyper_request.headers().get(header::USER_AGENT) { + Some(user_agent_value) => user_agent_value.to_str().unwrap_or_default(), + None => "", + }; + + // Some web browsers have broken HTTP compression handling + let is_netscape_4_broken_html_compression = user_agent.starts_with("Mozilla/4."); + let is_netscape_4_broken_compression = match user_agent.strip_prefix("Mozilla/4.") + { + Some(stripped_user_agent) => matches!( + stripped_user_agent.chars().nth(0), + Some('6') | Some('7') | Some('8') + ), + None => false, + }; + let is_w3m_broken_html_compression = user_agent.starts_with("w3m/"); + if !(content_type_option == Some("text/html".to_string()) + && (is_netscape_4_broken_html_compression || is_w3m_broken_html_compression)) + && !is_netscape_4_broken_compression + { + let accept_encoding = match hyper_request.headers().get(header::ACCEPT_ENCODING) + { + Some(header_value) => header_value.to_str().unwrap_or_default(), + None => "", + }; + + // Checking the Accept-Encoding header naively... + if accept_encoding.contains("br") { + use_brotli = true; + } else if accept_encoding.contains("zstd") { + use_zstd = true; + } else if accept_encoding.contains("deflate") { + use_deflate = true; + } else if accept_encoding.contains("gzip") { + use_gzip = true; + } + } + } + + let request_method = hyper_request.method(); + let content_length = metadata.len(); + + // Build response + let mut response_builder = Response::builder() + .status(StatusCode::OK) + .header(header::ACCEPT_RANGES, "bytes"); + + if let Some(etag) = etag_option { + if use_brotli { + response_builder = + response_builder.header(header::ETAG, format!("\"{}-br\"", etag)); + } else if use_zstd { + response_builder = + response_builder.header(header::ETAG, format!("\"{}-zstd\"", etag)); + } else if use_deflate { + response_builder = + response_builder.header(header::ETAG, format!("\"{}-deflate\"", etag)); + } else if use_gzip { + response_builder = + response_builder.header(header::ETAG, format!("\"{}-gzip\"", etag)); + } else { + response_builder = + response_builder.header(header::ETAG, format!("\"{}\"", etag)); + } + } + + response_builder = response_builder.header(header::VARY, vary); + + if let Some(content_type) = content_type_option { + response_builder = response_builder.header(header::CONTENT_TYPE, content_type); + } + + if use_brotli { + response_builder = response_builder.header(header::CONTENT_ENCODING, "br"); + } else if use_zstd { + response_builder = response_builder.header(header::CONTENT_ENCODING, "zstd"); + } else if use_deflate { + response_builder = response_builder.header(header::CONTENT_ENCODING, "deflate"); + } else if use_gzip { + response_builder = response_builder.header(header::CONTENT_ENCODING, "gzip"); + } else { + // Content-Length header + HTTP compression = broken HTTP responses! + response_builder = + response_builder.header(header::CONTENT_LENGTH, content_length); + } + + let response = match request_method { + &Method::HEAD => { + response_builder.body(Empty::new().map_err(|e| match e {}).boxed())? + } + _ => { + // Open file for reading + let file = match fs::File::open(joined_pathbuf).await { + Ok(file) => file, + Err(err) => match err.kind() { + tokio::io::ErrorKind::NotFound | tokio::io::ErrorKind::NotADirectory => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::NOT_FOUND) + .build(), + ); + } + tokio::io::ErrorKind::PermissionDenied => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::FORBIDDEN) + .build(), + ); + } + _ => Err(err)?, + }, + }; + + // Use BufReader for better performance. + let file_bufreader = BufReader::with_capacity(12800, file); + + // Construct a boxed body + let boxed_body = if use_brotli { + // Brotli compression quality of 4 + let reader_stream = ReaderStream::new(BrotliEncoder::with_quality( + file_bufreader, + Level::Precise(4), + )); + let stream_body = StreamBody::new(reader_stream.map_ok(Frame::data)); + stream_body.boxed() + } else if use_zstd { + // Limit the Zstandard window size to 128K (2^17 bytes) to support many HTTP clients + let reader_stream = ReaderStream::new(ZstdEncoder::with_quality_and_params( + file_bufreader, + Level::Default, + &[CParameter::window_log(17)], + )); + let stream_body = StreamBody::new(reader_stream.map_ok(Frame::data)); + stream_body.boxed() + } else if use_deflate { + let reader_stream = ReaderStream::new(DeflateEncoder::new(file_bufreader)); + let stream_body = StreamBody::new(reader_stream.map_ok(Frame::data)); + stream_body.boxed() + } else if use_gzip { + let reader_stream = ReaderStream::new(GzipEncoder::new(file_bufreader)); + let stream_body = StreamBody::new(reader_stream.map_ok(Frame::data)); + stream_body.boxed() + } else { + let reader_stream = ReaderStream::new(file_bufreader); + let stream_body = StreamBody::new(reader_stream.map_ok(Frame::data)); + stream_body.boxed() + }; + + response_builder.body(boxed_body)? + } + }; + + return Ok(ResponseData::builder(request).response(response).build()); + } + } else if metadata.is_dir() { + if config["enableDirectoryListing"].as_bool() == Some(true) { + let joined_maindesc_pathbuf = joined_pathbuf.join(".maindesc"); + let directory = match fs::read_dir(joined_pathbuf).await { + Ok(directory) => directory, + Err(err) => match err.kind() { + tokio::io::ErrorKind::NotFound => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::NOT_FOUND) + .build(), + ); + } + tokio::io::ErrorKind::PermissionDenied => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::FORBIDDEN) + .build(), + ); + } + _ => Err(err)?, + }, + }; + + let description = (fs::read_to_string(joined_maindesc_pathbuf).await).ok(); + + let directory_listing_html = + generate_directory_listing(directory, original_request_path, description).await?; + let content_length: Option = directory_listing_html.len().try_into().ok(); + + let mut response_builder = Response::builder().status(StatusCode::OK); + + if let Some(content_length) = content_length { + response_builder = response_builder.header(header::CONTENT_LENGTH, content_length) + } + response_builder = response_builder.header(header::CONTENT_TYPE, "text/html"); + + let response = response_builder.body( + Full::new(Bytes::from(directory_listing_html)) + .map_err(|e| match e {}) + .boxed(), + )?; + + return Ok(ResponseData::builder(request).response(response).build()); + } else { + return Ok( + ResponseData::builder(request) + .status(StatusCode::FORBIDDEN) + .build(), + ); + } + } else { + return Ok( + ResponseData::builder(request) + .status(StatusCode::NOT_IMPLEMENTED) + .build(), + ); + } + } + Err(err) => match err.kind() { + tokio::io::ErrorKind::NotFound | tokio::io::ErrorKind::NotADirectory => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::NOT_FOUND) + .build(), + ); + } + tokio::io::ErrorKind::PermissionDenied => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::FORBIDDEN) + .build(), + ); + } + _ => Err(err)?, + }, + } + } + + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} diff --git a/ferron/src/modules/url_rewrite.rs b/ferron/src/modules/url_rewrite.rs new file mode 100644 index 0000000000000000000000000000000000000000..c00eba8266e7650f5e726a8a30ba087f81042d31 --- /dev/null +++ b/ferron/src/modules/url_rewrite.rs @@ -0,0 +1,350 @@ +use std::error::Error; +use std::path::Path; +use std::sync::Arc; + +use crate::ferron_util::ip_match::ip_match; +use crate::ferron_util::match_hostname::match_hostname; +use crate::ferron_util::match_location::match_location; +use crate::ferron_util::url_rewrite_structs::{ + UrlRewriteMapEntry, UrlRewriteMapLocationWrap, UrlRewriteMapWrap, +}; + +use crate::ferron_common::{ + ErrorLogger, HyperResponse, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperUpgraded, WithRuntime}; +use async_trait::async_trait; +use fancy_regex::RegexBuilder; +use hyper::{header, Request, StatusCode}; +use hyper_tungstenite::HyperWebsocket; +use tokio::fs; +use tokio::runtime::Handle; +use yaml_rust2::Yaml; + +fn url_rewrite_config_init(rewrite_map: &[Yaml]) -> Result, anyhow::Error> { + let rewrite_map_iter = rewrite_map.iter(); + let mut rewrite_map_vec = Vec::new(); + for rewrite_map_entry in rewrite_map_iter { + let regex_str = match rewrite_map_entry["regex"].as_str() { + Some(regex_str) => regex_str, + None => return Err(anyhow::anyhow!("Invalid URL rewrite regular expression")), + }; + let regex = match RegexBuilder::new(regex_str) + .case_insensitive(cfg!(windows)) + .build() + { + Ok(regex) => regex, + Err(err) => { + return Err(anyhow::anyhow!( + "Invalid URL rewrite regular expression: {}", + err.to_string() + )) + } + }; + let replacement = match rewrite_map_entry["replacement"].as_str() { + Some(replacement) => String::from(replacement), + None => return Err(anyhow::anyhow!("URL rewrite rules must have replacements")), + }; + let is_not_file = rewrite_map_entry["isNotFile"].as_bool().unwrap_or(false); + let is_not_directory = rewrite_map_entry["isNotDirectory"] + .as_bool() + .unwrap_or(false); + let last = rewrite_map_entry["last"].as_bool().unwrap_or_default(); + let allow_double_slashes = rewrite_map_entry["allowDoubleSlashes"] + .as_bool() + .unwrap_or(false); + rewrite_map_vec.push(UrlRewriteMapEntry::new( + regex, + replacement, + is_not_directory, + is_not_file, + last, + allow_double_slashes, + )); + } + + Ok(rewrite_map_vec) +} + +pub fn server_module_init( + config: &ServerConfig, +) -> Result, Box> { + let mut global_url_rewrite_map = Vec::new(); + let mut host_url_rewrite_maps = Vec::new(); + if let Some(rewrite_map_yaml) = config["global"]["rewriteMap"].as_vec() { + global_url_rewrite_map = url_rewrite_config_init(rewrite_map_yaml)?; + } + + if let Some(hosts) = config["hosts"].as_vec() { + for host_yaml in hosts.iter() { + let domain = host_yaml["domain"].as_str().map(String::from); + let ip = host_yaml["ip"].as_str().map(String::from); + let mut locations = Vec::new(); + if let Some(locations_yaml) = host_yaml["locations"].as_vec() { + for location_yaml in locations_yaml.iter() { + if let Some(path_str) = location_yaml["path"].as_str() { + let path = String::from(path_str); + if let Some(rewrite_map_yaml) = location_yaml["rewriteMap"].as_vec() { + locations.push(UrlRewriteMapLocationWrap::new( + path, + url_rewrite_config_init(rewrite_map_yaml)?, + )); + } + } + } + } + if let Some(rewrite_map_yaml) = host_yaml["rewriteMap"].as_vec() { + host_url_rewrite_maps.push(UrlRewriteMapWrap::new( + domain, + ip, + url_rewrite_config_init(rewrite_map_yaml)?, + locations, + )); + } else if !locations.is_empty() { + host_url_rewrite_maps.push(UrlRewriteMapWrap::new(domain, ip, Vec::new(), locations)); + } + } + } + + Ok(Box::new(UrlRewriteModule::new( + Arc::new(global_url_rewrite_map), + Arc::new(host_url_rewrite_maps), + ))) +} + +struct UrlRewriteModule { + global_url_rewrite_map: Arc>, + host_url_rewrite_maps: Arc>, +} + +impl UrlRewriteModule { + fn new( + global_url_rewrite_map: Arc>, + host_url_rewrite_maps: Arc>, + ) -> Self { + Self { + global_url_rewrite_map, + host_url_rewrite_maps, + } + } +} + +impl ServerModule for UrlRewriteModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(UrlRewriteModuleHandlers { + global_url_rewrite_map: self.global_url_rewrite_map.clone(), + host_url_rewrite_maps: self.host_url_rewrite_maps.clone(), + handle, + }) + } +} +struct UrlRewriteModuleHandlers { + global_url_rewrite_map: Arc>, + host_url_rewrite_maps: Arc>, + handle: Handle, +} + +#[async_trait] +impl ServerModuleHandlers for UrlRewriteModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + let hyper_request = request.get_hyper_request(); + let global_url_rewrite_map = self.global_url_rewrite_map.iter(); + let empty_vector = Vec::new(); + let another_empty_vector = Vec::new(); + let mut host_url_rewrite_map = empty_vector.iter(); + let mut location_url_rewrite_map = another_empty_vector.iter(); + + // Should have used a HashMap instead of iterating over an array for better performance... + for host_url_rewrite_map_wrap in self.host_url_rewrite_maps.iter() { + if match_hostname( + match &host_url_rewrite_map_wrap.domain { + Some(value) => Some(value as &str), + None => None, + }, + match hyper_request.headers().get(header::HOST) { + Some(value) => value.to_str().ok(), + None => None, + }, + ) && match &host_url_rewrite_map_wrap.ip { + Some(value) => ip_match(value as &str, socket_data.remote_addr.ip()), + None => true, + } { + host_url_rewrite_map = host_url_rewrite_map_wrap.rewrite_map.iter(); + if let Ok(path_decoded) = urlencoding::decode( + request + .get_original_url() + .unwrap_or(request.get_hyper_request().uri()) + .path(), + ) { + for location_wrap in host_url_rewrite_map_wrap.locations.iter() { + if match_location(&location_wrap.path, &path_decoded) { + location_url_rewrite_map = location_wrap.rewrite_map.iter(); + break; + } + } + } + break; + } + } + + let combined_url_rewrite_map = global_url_rewrite_map + .chain(host_url_rewrite_map) + .chain(location_url_rewrite_map); + + let original_url = format!( + "{}{}", + hyper_request.uri().path(), + match hyper_request.uri().query() { + Some(query) => format!("?{}", query), + None => String::from(""), + } + ); + let mut rewritten_url = original_url.clone(); + + let mut rewritten_url_bytes = rewritten_url.bytes(); + if rewritten_url_bytes.len() < 1 || rewritten_url_bytes.nth(0) != Some(b'/') { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + + for url_rewrite_map_entry in combined_url_rewrite_map { + // Check if it's a file or a directory according to the rewrite map configuration + if url_rewrite_map_entry.is_not_directory || url_rewrite_map_entry.is_not_file { + if let Some(wwwroot) = config["wwwroot"].as_str() { + let path = Path::new(wwwroot); + let mut relative_path = &rewritten_url[1..]; + while relative_path.as_bytes().first().copied() == Some(b'/') { + relative_path = &relative_path[1..]; + } + let relative_path_split: Vec<&str> = relative_path.split("?").collect(); + if !relative_path_split.is_empty() { + relative_path = relative_path_split[0]; + } + let joined_pathbuf = path.join(relative_path); + if let Ok(metadata) = fs::metadata(joined_pathbuf).await { + if (url_rewrite_map_entry.is_not_file && metadata.is_file()) + || (url_rewrite_map_entry.is_not_directory && metadata.is_dir()) + { + continue; + } + } + } + } + + if !url_rewrite_map_entry.allow_double_slashes { + while rewritten_url.contains("//") { + rewritten_url = rewritten_url.replace("//", "/"); + } + } + + // Actual URL rewriting + let old_rewritten_url = rewritten_url; + rewritten_url = url_rewrite_map_entry + .regex + .replace(&old_rewritten_url, &url_rewrite_map_entry.replacement) + .to_string(); + + let mut rewritten_url_bytes = rewritten_url.bytes(); + if rewritten_url_bytes.len() < 1 || rewritten_url_bytes.nth(0) != Some(b'/') { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + + if url_rewrite_map_entry.last && old_rewritten_url != rewritten_url { + break; + } + } + + if rewritten_url == original_url { + Ok(ResponseData::builder(request).build()) + } else { + if config["enableRewriteLogging"].as_bool() == Some(true) { + error_logger + .log(&format!( + "URL rewritten from \"{}\" to \"{}\"", + original_url, rewritten_url + )) + .await; + } + let (hyper_request, auth_user, _) = request.into_parts(); + let (mut parts, body) = hyper_request.into_parts(); + let original_url = parts.uri.clone(); + let mut url_parts = parts.uri.into_parts(); + url_parts.path_and_query = Some(rewritten_url.parse()?); + parts.uri = hyper::Uri::from_parts(url_parts)?; + let hyper_request = Request::from_parts(parts, body); + let request = RequestData::new(hyper_request, auth_user, Some(original_url)); + Ok(ResponseData::builder(request).build()) + } + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} diff --git a/ferron/src/modules/x_forwarded_for.rs b/ferron/src/modules/x_forwarded_for.rs new file mode 100644 index 0000000000000000000000000000000000000000..ff0bc70715661937b9cdae86cb4ca403adca3e8d --- /dev/null +++ b/ferron/src/modules/x_forwarded_for.rs @@ -0,0 +1,144 @@ +use std::error::Error; +use std::net::{IpAddr, SocketAddr}; + +use crate::ferron_common::{ + ErrorLogger, HyperResponse, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperUpgraded, WithRuntime}; +use async_trait::async_trait; +use hyper::StatusCode; +use hyper_tungstenite::HyperWebsocket; +use tokio::runtime::Handle; + +struct XForwardedForModule; + +pub fn server_module_init( +) -> Result, Box> { + Ok(Box::new(XForwardedForModule::new())) +} + +impl XForwardedForModule { + fn new() -> Self { + Self + } +} + +impl ServerModule for XForwardedForModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(XForwardedForModuleHandlers { handle }) + } +} +struct XForwardedForModuleHandlers { + handle: Handle, +} + +#[async_trait] +impl ServerModuleHandlers for XForwardedForModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + if config["enableIPSpoofing"].as_bool() == Some(true) { + let hyper_request = request.get_hyper_request(); + + if let Some(x_forwarded_for_value) = hyper_request.headers().get("x-forwarded-for") { + let x_forwarded_for = x_forwarded_for_value.to_str()?; + + let prepared_remote_ip_str = match x_forwarded_for.split(",").nth(0) { + Some(ip_address_str) => ip_address_str.replace(" ", ""), + None => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let prepared_remote_ip: IpAddr = match prepared_remote_ip_str.parse() { + Ok(ip_address) => ip_address, + Err(_) => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let new_socket_addr = SocketAddr::new(prepared_remote_ip, socket_data.remote_addr.port()); + + return Ok( + ResponseData::builder(request) + .new_remote_address(new_socket_addr) + .build(), + ); + } + + return Ok(ResponseData::builder(request).build()); + } + + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} diff --git a/ferron/src/optional_modules/asgi.rs b/ferron/src/optional_modules/asgi.rs new file mode 100644 index 0000000000000000000000000000000000000000..6ac2c6895cb443a7df02db44481670186b02b510 --- /dev/null +++ b/ferron/src/optional_modules/asgi.rs @@ -0,0 +1,1476 @@ +// WARNING: We have measured this module on our computers, and found it to be slower than Uvicorn (with 1 worker), +// with FastAPI application, vanilla ASGI application is found out to be faster than Uvicorn (with 1 worker). +// It might be more performant to just use Ferron as a reverse proxy for Uvicorn (or any other ASGI server). + +use std::error::Error; +use std::ffi::CString; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::thread; + +use crate::ferron_common::{ + ErrorLogger, HyperUpgraded, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperResponse, WithRuntime}; +use crate::ferron_util::asgi_messages::{ + asgi_event_to_outgoing_struct, incoming_struct_to_asgi_event, AsgiHttpBody, AsgiHttpInitData, + AsgiInitData, AsgiWebsocketClose, AsgiWebsocketInitData, AsgiWebsocketMessage, + IncomingAsgiMessage, IncomingAsgiMessageInner, OutgoingAsgiMessage, OutgoingAsgiMessageInner, +}; +use crate::ferron_util::asgi_structs::{AsgiApplicationLocationWrap, AsgiApplicationWrap}; +use crate::ferron_util::ip_match::ip_match; +use crate::ferron_util::match_hostname::match_hostname; +use crate::ferron_util::match_location::match_location; +use async_channel::{Receiver, Sender}; +use async_trait::async_trait; +use futures_util::{SinkExt, StreamExt}; +use http::{HeaderMap, HeaderName, HeaderValue, Response, Version}; +use http_body_util::{BodyExt, StreamBody}; +use hyper::body::{Bytes, Frame}; +use hyper::{header, StatusCode}; +use hyper_tungstenite::HyperWebsocket; +use pyo3::exceptions::{PyIOError, PyOSError, PyRuntimeError, PyTypeError}; +use pyo3::prelude::*; +use pyo3::types::{PyCFunction, PyDict, PyList, PyTuple, PyType}; +use tokio::fs; +use tokio::runtime::{Handle, Runtime}; +use tokio::sync::Mutex; +use tokio_tungstenite::tungstenite::protocol::CloseFrame; +use tokio_tungstenite::tungstenite::Message; +use tokio_util::sync::CancellationToken; + +type AsgiChannelResult = + Result<(Sender, Receiver), anyhow::Error>; +type AsgiEventLoopCommunication = Vec<(Sender<()>, Receiver)>; + +async fn asgi_application_fn( + asgi_application: Arc>, + tx: Sender, + rx: Receiver, +) { + let init_message = match rx.recv().await { + Ok(IncomingAsgiMessage::Init(message)) => message, + Err(err) => { + tx.send(OutgoingAsgiMessage::Error(PyErr::new::( + err.to_string(), + ))) + .await + .unwrap_or_default(); + return; + } + _ => { + tx.send(OutgoingAsgiMessage::Error(PyErr::new::( + "Unexpected message received", + ))) + .await + .unwrap_or_default(); + return; + } + }; + let tx_clone = tx.clone(); + let rx_clone = rx.clone(); + match Python::with_gil(move |py| -> PyResult<_> { + let tx_clone = tx_clone.clone(); + let rx_clone = rx_clone.clone(); + + let scope = PyDict::new(py); + let scope_asgi = PyDict::new(py); + + match init_message { + AsgiInitData::Lifespan => { + scope.set_item("type", "lifespan")?; + scope_asgi.set_item("version", "3.0")?; + } + AsgiInitData::Http(http_init_data) => { + let path = http_init_data.hyper_request_parts.uri.path().to_owned(); + let query_string = http_init_data + .hyper_request_parts + .uri + .query() + .unwrap_or("") + .to_owned(); + let original_request_uri = http_init_data + .original_request_uri + .unwrap_or(http_init_data.hyper_request_parts.uri); + scope.set_item("type", "http")?; + scope_asgi.set_item("version", "2.5")?; + scope.set_item( + "http_version", + match http_init_data.hyper_request_parts.version { + Version::HTTP_09 => "1.0", // ASGI doesn't support HTTP/0.9 + Version::HTTP_10 => "1.0", + Version::HTTP_11 => "1.1", + Version::HTTP_2 => "2", + Version::HTTP_3 => "2", // ASGI doesn't support HTTP/3 + _ => "1.1", // Some other HTTP versions, of course... + }, + )?; + scope.set_item( + "method", + http_init_data.hyper_request_parts.method.to_string(), + )?; + scope.set_item( + "scheme", + if http_init_data.socket_data.encrypted { + "https" + } else { + "http" + }, + )?; + scope.set_item("path", urlencoding::decode(&path)?)?; + scope.set_item("raw_path", original_request_uri.to_string().as_bytes())?; + scope.set_item("query_string", query_string.as_bytes())?; + if let Ok(script_path) = http_init_data + .execute_pathbuf + .as_path() + .strip_prefix(http_init_data.wwwroot) + { + scope.set_item( + "root_path", + format!( + "/{}", + match cfg!(windows) { + true => script_path.to_string_lossy().to_string().replace("\\", "/"), + false => script_path.to_string_lossy().to_string(), + } + ), + )?; + } + let headers = PyList::empty(py); + for (header_name, header_value) in http_init_data.hyper_request_parts.headers.iter() { + let header_name = header_name.as_str().as_bytes(); + let header_value = header_value.as_bytes(); + if !header_name.is_empty() && header_name[0] != b':' { + headers.append(PyTuple::new(py, [header_name, header_value].into_iter())?)?; + } + } + scope.set_item("headers", headers)?; + scope.set_item( + "client", + ( + http_init_data + .socket_data + .remote_addr + .ip() + .to_canonical() + .to_string(), + http_init_data.socket_data.remote_addr.port(), + ), + )?; + scope.set_item( + "server", + ( + http_init_data + .socket_data + .local_addr + .ip() + .to_canonical() + .to_string(), + http_init_data.socket_data.local_addr.port(), + ), + )?; + } + AsgiInitData::Websocket(websocket_init_data) => { + let path = websocket_init_data.uri.path().to_owned(); + let query_string = websocket_init_data.uri.query().unwrap_or("").to_owned(); + let original_request_uri = websocket_init_data.uri; + scope.set_item("type", "websocket")?; + scope_asgi.set_item("version", "2.5")?; + scope.set_item( + "http_version", + "1.1", // WebSocket is supported only on HTTP/1.1 in Ferron + )?; + scope.set_item( + "scheme", + if websocket_init_data.socket_data.encrypted { + "wss" + } else { + "ws" + }, + )?; + scope.set_item("path", urlencoding::decode(&path)?)?; + scope.set_item("raw_path", original_request_uri.to_string().as_bytes())?; + scope.set_item("query_string", query_string.as_bytes())?; + if let Ok(script_path) = websocket_init_data + .execute_pathbuf + .as_path() + .strip_prefix(websocket_init_data.wwwroot) + { + scope.set_item( + "root_path", + format!( + "/{}", + match cfg!(windows) { + true => script_path.to_string_lossy().to_string().replace("\\", "/"), + false => script_path.to_string_lossy().to_string(), + } + ), + )?; + } + // Ferron doesn't send original request headers (before WebSocket upgrade) to WebSocket request handlers + scope.set_item("headers", PyList::empty(py))?; + scope.set_item( + "client", + ( + websocket_init_data + .socket_data + .remote_addr + .ip() + .to_canonical() + .to_string(), + websocket_init_data.socket_data.remote_addr.port(), + ), + )?; + scope.set_item( + "server", + ( + websocket_init_data + .socket_data + .local_addr + .ip() + .to_canonical() + .to_string(), + websocket_init_data.socket_data.local_addr.port(), + ), + )?; + scope.set_item("subprotocols", PyList::empty(py))?; + } + }; + + scope_asgi.set_item("spec_version", "1.0")?; + scope.set_item("asgi", scope_asgi)?; + let scope_extensions = PyDict::new(py); + scope_extensions.set_item("http.response.trailers", PyDict::new(py))?; + scope.set_item("extensions", scope_extensions)?; + + let client_disconnected = Arc::new(AtomicBool::new(false)); + let client_disconnected_clone = client_disconnected.clone(); + + let receive = PyCFunction::new_closure( + py, + None, + None, + move |args: &Bound<'_, PyTuple>, _: Option<&Bound<'_, PyDict>>| -> PyResult<_> { + let rx = rx_clone.clone(); + let client_disconnected = client_disconnected.clone(); + Ok( + pyo3_async_runtimes::tokio::future_into_py(args.py(), async move { + if client_disconnected.load(Ordering::Relaxed) { + Err(PyErr::new::("Client disconnected")) + } else { + let message = rx + .recv() + .await + .map_err(|e| PyErr::new::(e.to_string()))?; + match message { + IncomingAsgiMessage::Init(_) => Err(PyErr::new::( + "Unexpected ASGI initialization message", + )), + IncomingAsgiMessage::Message(message) => { + if let IncomingAsgiMessageInner::HttpDisconnect = &message { + client_disconnected.store(true, Ordering::Relaxed); + } + incoming_struct_to_asgi_event(message) + } + } + } + })? + .unbind(), + ) + }, + )?; + let send = PyCFunction::new_closure( + py, + None, + None, + move |args: &Bound<'_, PyTuple>, _: Option<&Bound<'_, PyDict>>| -> PyResult<_> { + let event = args.get_item(0)?.downcast::()?.clone(); + let message = asgi_event_to_outgoing_struct(event)?; + let tx = tx_clone.clone(); + let client_disconnected = client_disconnected_clone.clone(); + Ok( + pyo3_async_runtimes::tokio::future_into_py(args.py(), async move { + if client_disconnected.load(Ordering::Relaxed) { + Err(PyErr::new::("Client disconnected")) + } else { + tx.send(OutgoingAsgiMessage::Message(message)) + .await + .map_err(|e| PyErr::new::(e.to_string()))?; + Ok(()) + } + })? + .unbind(), + ) + }, + )?; + + let asgi_coroutine = + match asgi_application.call(py, (scope.clone(), receive.clone(), send.clone()), None) { + Ok(coroutine) => coroutine, + Err(err) => { + if !err.get_type(py).is(&PyType::new::(py)) { + return Err(err); + } else { + asgi_application + .call(py, (scope,), None)? + .call(py, (receive, send), None)? + } + } + }; + + pyo3_async_runtimes::tokio::into_future(asgi_coroutine.into_bound(py)) + }) { + Err(err) => tx + .send(OutgoingAsgiMessage::Error(PyErr::new::( + err.to_string(), + ))) + .await + .unwrap_or_default(), + Ok(asgi_future) => match asgi_future.await { + Err(err) => tx + .send(OutgoingAsgiMessage::Error(err)) + .await + .unwrap_or_default(), + Ok(_) => tx + .send(OutgoingAsgiMessage::Finished) + .await + .unwrap_or_default(), + }, + } +} + +async fn asgi_lifetime_init_fn(asgi_applications: Vec>>) -> Vec { + let mut results = Vec::new(); + for asgi_application in asgi_applications { + results.push( + async { + let (tx, rx_task) = async_channel::unbounded::(); + let (tx_task, rx) = async_channel::unbounded::(); + if let Ok(locals) = Python::with_gil(pyo3_async_runtimes::tokio::get_current_locals) { + tokio::spawn(pyo3_async_runtimes::tokio::scope( + locals, + asgi_application_fn(asgi_application, tx_task, rx_task), + )); + tx.send(IncomingAsgiMessage::Init(AsgiInitData::Lifespan)) + .await + .map_err(|e| anyhow::anyhow!(e.to_string()))?; + Ok((tx, rx)) + } else { + Err(anyhow::anyhow!("Cannot obtain task locals")) + } + } + .await, + ); + } + results +} + +async fn asgi_event_loop_fn( + asgi_application: Arc>, + tx: Sender, + rx: Receiver<()>, +) { + loop { + if rx.recv().await.is_err() { + continue; + } + + let (tx_send, rx_task) = async_channel::unbounded::(); + let (tx_task, rx_send) = async_channel::unbounded::(); + let asgi_application_cloned = asgi_application.clone(); + if let Ok(locals) = Python::with_gil(pyo3_async_runtimes::tokio::get_current_locals) { + tokio::spawn(pyo3_async_runtimes::tokio::scope( + locals, + asgi_application_fn(asgi_application_cloned, tx_task, rx_task), + )); + tx.send(Ok((tx_send, rx_send))).await.unwrap_or_default(); + } + } +} + +async fn asgi_init_event_loop_fn( + cancel_token: CancellationToken, + asgi_applications: Vec>>, + mut channels: Vec<(Sender, Receiver<()>)>, +) { + Python::with_gil(|py| { + // Try installing `uvloop`, when it fails, use `asyncio` fallback instead. + if let Ok(uvloop) = py.import("uvloop") { + let _ = uvloop.call_method0("install"); + } + + pyo3_async_runtimes::tokio::run::<_, ()>(py, async move { + let asgi_lifetime_channels = asgi_lifetime_init_fn(asgi_applications.clone()).await; + for asgi_lifetime_channel_result in &asgi_lifetime_channels { + if let Ok((tx, rx)) = asgi_lifetime_channel_result.as_ref() { + tx.send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::LifespanStartup, + )) + .await + .unwrap_or_default(); + loop { + match rx.recv().await { + Ok(OutgoingAsgiMessage::Message( + OutgoingAsgiMessageInner::LifespanStartupComplete, + )) + | Ok(OutgoingAsgiMessage::Message( + OutgoingAsgiMessageInner::LifespanStartupFailed(_), + )) + | Ok(OutgoingAsgiMessage::Finished) + | Ok(OutgoingAsgiMessage::Error(_)) + | Err(_) => break, + _ => (), + } + } + } + } + let init_closure = async move { + let mut channels_len = channels.len(); + if let Some((tx_last, rx_last)) = channels.pop() { + channels_len -= 1; + let last_channel_id = channels_len; + for (tx, rx) in channels { + channels_len -= 1; + if let Ok(locals) = Python::with_gil(pyo3_async_runtimes::tokio::get_current_locals) { + tokio::spawn(pyo3_async_runtimes::tokio::scope( + locals, + asgi_event_loop_fn(asgi_applications[channels_len].clone(), tx, rx), + )); + } + } + + if let Ok(locals) = Python::with_gil(pyo3_async_runtimes::tokio::get_current_locals) { + tokio::spawn(pyo3_async_runtimes::tokio::scope( + locals, + asgi_event_loop_fn(asgi_applications[last_channel_id].clone(), tx_last, rx_last), + )) + .await + .unwrap_or_default(); + } + } + }; + tokio::select! { + _ = cancel_token.cancelled() => {} + _ = init_closure => {} + } + for asgi_lifetime_channel_result in &asgi_lifetime_channels { + if let Ok((tx, rx)) = asgi_lifetime_channel_result.as_ref() { + tx.send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::LifespanShutdown, + )) + .await + .unwrap_or_default(); + loop { + match rx.recv().await { + Ok(OutgoingAsgiMessage::Message( + OutgoingAsgiMessageInner::LifespanShutdownComplete, + )) + | Ok(OutgoingAsgiMessage::Message( + OutgoingAsgiMessageInner::LifespanShutdownFailed(_), + )) + | Ok(OutgoingAsgiMessage::Finished) + | Ok(OutgoingAsgiMessage::Error(_)) + | Err(_) => break, + _ => (), + } + } + } + } + Ok(()) + }) + }) + .unwrap_or_default(); +} + +pub fn load_asgi_application( + file_path: &Path, + clear_sys_path: bool, +) -> Result, Box> { + let script_dirname = file_path + .parent() + .map(|path| path.to_string_lossy().to_string()); + let script_name = file_path.to_string_lossy().to_string(); + let script_name_cstring = CString::from_str(&script_name)?; + let module_name = script_name + .strip_suffix(".py") + .unwrap_or(&script_name) + .to_lowercase() + .chars() + .map(|c| if c.is_lowercase() { '_' } else { c }) + .collect::(); + let module_name_cstring = CString::from_str(&module_name)?; + let script_data = std::fs::read_to_string(file_path)?; + let script_data_cstring = CString::from_str(&script_data)?; + let asgi_application = Python::with_gil(move |py| -> PyResult> { + let mut sys_path_old = None; + if let Some(script_dirname) = script_dirname { + if let Ok(sys_module) = PyModule::import(py, "sys") { + if let Ok(sys_path_any) = sys_module.getattr("path") { + if let Ok(sys_path) = sys_path_any.downcast::() { + let sys_path = sys_path.clone(); + sys_path_old = sys_path.extract::>().ok(); + sys_path.insert(0, script_dirname).unwrap_or_default(); + } + } + } + } + let asgi_application = PyModule::from_code( + py, + &script_data_cstring, + &script_name_cstring, + &module_name_cstring, + )? + .getattr("application")? + .unbind(); + if clear_sys_path { + if let Some(sys_path) = sys_path_old { + if let Ok(sys_module) = PyModule::import(py, "sys") { + sys_module.setattr("path", sys_path).unwrap_or_default(); + } + } + } + Ok(asgi_application) + })?; + Ok(asgi_application) +} + +pub fn server_module_init( + config: &ServerConfig, +) -> Result, Box> { + let mut asgi_applications = Vec::new(); + let mut global_asgi_application_id = None; + let mut host_asgi_application_ids = Vec::new(); + let clear_sys_path = config["global"]["asgiClearModuleImportPath"] + .as_bool() + .unwrap_or(false); + if let Some(asgi_application_path) = config["global"]["asgiApplicationPath"].as_str() { + let asgi_application_id = asgi_applications.len(); + asgi_applications.push(Arc::new(load_asgi_application( + PathBuf::from_str(asgi_application_path)?.as_path(), + clear_sys_path, + )?)); + global_asgi_application_id = Some(asgi_application_id); + } + let global_asgi_path = config["global"]["asgiPath"].as_str().map(|s| s.to_string()); + + if let Some(hosts) = config["hosts"].as_vec() { + for host_yaml in hosts.iter() { + let domain = host_yaml["domain"].as_str().map(String::from); + let ip = host_yaml["ip"].as_str().map(String::from); + let mut locations = Vec::new(); + if let Some(locations_yaml) = host_yaml["locations"].as_vec() { + for location_yaml in locations_yaml.iter() { + if let Some(path_str) = location_yaml["path"].as_str() { + let path = String::from(path_str); + if let Some(asgi_application_path) = location_yaml["asgiApplicationPath"].as_str() { + let asgi_application_id = asgi_applications.len(); + asgi_applications.push(Arc::new(load_asgi_application( + PathBuf::from_str(asgi_application_path)?.as_path(), + clear_sys_path, + )?)); + + locations.push(AsgiApplicationLocationWrap::new( + path, + asgi_application_id, + asgi_application_path.to_string(), + location_yaml["asgiPath"].as_str().map(|s| s.to_string()), + )); + } + } + } + } + if let Some(asgi_application_path) = host_yaml["asgiApplicationPath"].as_str() { + let asgi_application_id = asgi_applications.len(); + asgi_applications.push(Arc::new(load_asgi_application( + PathBuf::from_str(asgi_application_path)?.as_path(), + clear_sys_path, + )?)); + host_asgi_application_ids.push(AsgiApplicationWrap::new( + domain, + ip, + Some(asgi_application_id), + Some(asgi_application_path.to_string()), + host_yaml["asgiPath"].as_str().map(|s| s.to_string()), + locations, + )); + } else if !locations.is_empty() { + host_asgi_application_ids.push(AsgiApplicationWrap::new( + domain, + ip, + None, + None, + host_yaml["asgiPath"].as_str().map(|s| s.to_string()), + locations, + )); + } + } + } + + let cancel_token: CancellationToken = CancellationToken::new(); + let cancel_token_thread = cancel_token.clone(); + let mut asgi_event_loop_communication = Vec::new(); + let mut asgi_event_loop_communication_thread = Vec::new(); + + for _ in 0..asgi_applications.len() { + let (tx, rx_thread) = async_channel::unbounded::<()>(); + let (tx_thread, rx) = async_channel::unbounded::(); + asgi_event_loop_communication.push((tx, rx)); + asgi_event_loop_communication_thread.push((tx_thread, rx_thread)); + } + + let available_parallelism = thread::available_parallelism()?.get(); + + // Initialize a single-threaded (due to Python's GIL) Tokio runtime to be used as an intermediary event loop for asynchronous Python + let mut runtime_builder = tokio::runtime::Builder::new_multi_thread(); + runtime_builder + .worker_threads(1) + .enable_all() + .thread_name("python-async-pool"); + pyo3_async_runtimes::tokio::init(runtime_builder); + + // Create and spawn a task in the Tokio runtime for ASGI + let runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(match available_parallelism / 2 { + 0 => 1, + non_zero => non_zero, + }) + .enable_all() + .thread_name("asgi-pool") + .build()?; + + runtime.spawn(asgi_init_event_loop_fn( + cancel_token_thread, + asgi_applications, + asgi_event_loop_communication_thread, + )); + + Ok(Box::new(AsgiModule::new( + global_asgi_application_id, + global_asgi_path, + Arc::new(host_asgi_application_ids), + cancel_token, + asgi_event_loop_communication, + runtime, + ))) +} + +struct AsgiModule { + global_asgi_application_id: Option, + global_asgi_path: Option, + host_asgi_application_ids: Arc>, + cancel_token: CancellationToken, + asgi_event_loop_communication: AsgiEventLoopCommunication, + #[allow(dead_code)] + runtime: Runtime, +} + +impl AsgiModule { + fn new( + global_asgi_application_id: Option, + global_asgi_path: Option, + host_asgi_application_ids: Arc>, + cancel_token: CancellationToken, + asgi_event_loop_communication: AsgiEventLoopCommunication, + runtime: Runtime, + ) -> Self { + AsgiModule { + global_asgi_application_id, + global_asgi_path, + host_asgi_application_ids, + cancel_token, + asgi_event_loop_communication, + runtime, + } + } +} + +impl ServerModule for AsgiModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(AsgiModuleHandlers { + global_asgi_application_id: self.global_asgi_application_id, + global_asgi_path: self.global_asgi_path.clone(), + host_asgi_application_ids: self.host_asgi_application_ids.clone(), + asgi_event_loop_communication: self.asgi_event_loop_communication.clone(), + handle, + }) + } +} + +impl Drop for AsgiModule { + fn drop(&mut self) { + self.cancel_token.cancel(); + } +} + +struct AsgiModuleHandlers { + global_asgi_application_id: Option, + global_asgi_path: Option, + host_asgi_application_ids: Arc>, + asgi_event_loop_communication: AsgiEventLoopCommunication, + handle: Handle, +} + +#[async_trait] +impl ServerModuleHandlers for AsgiModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + let hyper_request = request.get_hyper_request(); + + // Use .take() instead of .clone(), since the values in Options will only be used once. + let mut asgi_application_id = self.global_asgi_application_id.take(); + let mut asgi_path = self.global_asgi_path.take(); + + // Should have used a HashMap instead of iterating over an array for better performance... + for host_asgi_application_wrap in self.host_asgi_application_ids.iter() { + if match_hostname( + match &host_asgi_application_wrap.domain { + Some(value) => Some(value as &str), + None => None, + }, + match hyper_request.headers().get(header::HOST) { + Some(value) => value.to_str().ok(), + None => None, + }, + ) && match &host_asgi_application_wrap.ip { + Some(value) => ip_match(value as &str, socket_data.remote_addr.ip()), + None => true, + } { + asgi_application_id = host_asgi_application_wrap.asgi_application_id; + asgi_path = host_asgi_application_wrap.asgi_path.clone(); + if let Ok(path_decoded) = urlencoding::decode( + request + .get_original_url() + .unwrap_or(request.get_hyper_request().uri()) + .path(), + ) { + for location_wrap in host_asgi_application_wrap.locations.iter() { + if match_location(&location_wrap.path, &path_decoded) { + asgi_application_id = Some(location_wrap.asgi_application_id); + asgi_path = location_wrap.asgi_path.clone(); + break; + } + } + } + break; + } + } + + let request_path = hyper_request.uri().path(); + let mut request_path_bytes = request_path.bytes(); + if request_path_bytes.len() < 1 || request_path_bytes.nth(0) != Some(b'/') { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + + if let Some(asgi_application_id) = asgi_application_id { + let asgi_path = asgi_path.unwrap_or("/".to_string()); + let mut canonical_asgi_path: &str = &asgi_path; + if canonical_asgi_path.bytes().last() == Some(b'/') { + canonical_asgi_path = &canonical_asgi_path[..(canonical_asgi_path.len() - 1)]; + } + + let request_path_with_slashes = match request_path == canonical_asgi_path { + true => format!("{}/", request_path), + false => request_path.to_string(), + }; + if let Some(stripped_request_path) = + request_path_with_slashes.strip_prefix(canonical_asgi_path) + { + let wwwroot_yaml = &config["wwwroot"]; + let wwwroot = wwwroot_yaml.as_str().unwrap_or("/nonexistent"); + + let wwwroot_unknown = PathBuf::from(wwwroot); + let wwwroot_pathbuf = match wwwroot_unknown.as_path().is_absolute() { + true => wwwroot_unknown, + false => match fs::canonicalize(&wwwroot_unknown).await { + Ok(pathbuf) => pathbuf, + Err(_) => wwwroot_unknown, + }, + }; + let wwwroot = wwwroot_pathbuf.as_path(); + + let mut relative_path = &request_path[1..]; + while relative_path.as_bytes().first().copied() == Some(b'/') { + relative_path = &relative_path[1..]; + } + + let decoded_relative_path = match urlencoding::decode(relative_path) { + Ok(path) => path.to_string(), + Err(_) => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let joined_pathbuf = wwwroot.join(decoded_relative_path); + let execute_pathbuf = joined_pathbuf; + let execute_path_info = stripped_request_path + .strip_prefix("/") + .map(|s| s.to_string()); + + let (tx, rx) = { + let (tx, rx) = &self.asgi_event_loop_communication[asgi_application_id]; + tx.send(()).await?; + rx.recv().await?? + }; + + return execute_asgi( + request, + socket_data, + error_logger, + wwwroot, + execute_pathbuf, + execute_path_info, + config["serverAdministratorEmail"].as_str(), + tx, + rx, + ) + .await; + } + } + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + websocket: HyperWebsocket, + uri: &hyper::Uri, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result<(), Box> { + WithRuntime::new(self.handle.clone(), async move { + // Use .take() instead of .clone(), since the values in Options will only be used once. + let mut asgi_application_id = self.global_asgi_application_id.take(); + let mut asgi_path = self.global_asgi_path.take(); + + // Should have used a HashMap instead of iterating over an array for better performance... + for host_asgi_application_wrap in self.host_asgi_application_ids.iter() { + // Workaround for Ferron not providing the domain name for WebSocket connections + let config_test_domain = host_asgi_application_wrap + .domain + .as_ref() + .map(|value| value as &str); + let obtained_domain = config["domain"].as_str(); + if config_test_domain == obtained_domain + && config["asgiApplicationPath"].as_str() + == host_asgi_application_wrap.asgi_application_path.as_deref() + && match &host_asgi_application_wrap.ip { + Some(value) => ip_match(value as &str, socket_data.remote_addr.ip()), + None => true, + } + { + asgi_application_id = host_asgi_application_wrap.asgi_application_id; + asgi_path = host_asgi_application_wrap.asgi_path.clone(); + if let Ok(path_decoded) = urlencoding::decode(uri.path()) { + for location_wrap in host_asgi_application_wrap.locations.iter() { + if match_location(&location_wrap.path, &path_decoded) { + asgi_application_id = Some(location_wrap.asgi_application_id); + asgi_path = location_wrap.asgi_path.clone(); + break; + } + } + } + break; + } + } + + let request_path = uri.path(); + let mut request_path_bytes = request_path.bytes(); + if request_path_bytes.len() < 1 || request_path_bytes.nth(0) != Some(b'/') { + return Ok(()); + } + + if let Some(asgi_application_id) = asgi_application_id { + let asgi_path = asgi_path.unwrap_or("/".to_string()); + let mut canonical_asgi_path: &str = &asgi_path; + if canonical_asgi_path.bytes().last() == Some(b'/') { + canonical_asgi_path = &canonical_asgi_path[..(canonical_asgi_path.len() - 1)]; + } + + let request_path_with_slashes = match request_path == canonical_asgi_path { + true => format!("{}/", request_path), + false => request_path.to_string(), + }; + if let Some(stripped_request_path) = + request_path_with_slashes.strip_prefix(canonical_asgi_path) + { + let wwwroot_yaml = &config["wwwroot"]; + let wwwroot = wwwroot_yaml.as_str().unwrap_or("/nonexistent"); + + let wwwroot_unknown = PathBuf::from(wwwroot); + let wwwroot_pathbuf = match wwwroot_unknown.as_path().is_absolute() { + true => wwwroot_unknown, + false => match fs::canonicalize(&wwwroot_unknown).await { + Ok(pathbuf) => pathbuf, + Err(_) => wwwroot_unknown, + }, + }; + let wwwroot = wwwroot_pathbuf.as_path(); + + let mut relative_path = &request_path[1..]; + while relative_path.as_bytes().first().copied() == Some(b'/') { + relative_path = &relative_path[1..]; + } + + let decoded_relative_path = match urlencoding::decode(relative_path) { + Ok(path) => path.to_string(), + Err(_) => { + return Ok(()); + } + }; + + let joined_pathbuf = wwwroot.join(decoded_relative_path); + let execute_pathbuf = joined_pathbuf; + let execute_path_info = stripped_request_path + .strip_prefix("/") + .map(|s| s.to_string()); + + let (tx, rx) = { + let (tx, rx) = &self.asgi_event_loop_communication[asgi_application_id]; + tx.send(()).await?; + rx.recv().await?? + }; + + return execute_asgi_websocket( + websocket, + uri, + socket_data, + error_logger, + wwwroot, + execute_pathbuf, + execute_path_info, + config["serverAdministratorEmail"].as_str(), + tx, + rx, + ) + .await; + } + } + Ok(()) + }) + .await + } + + fn does_websocket_requests(&mut self, config: &ServerConfig, _socket_data: &SocketData) -> bool { + config["asgiApplicationPath"].as_str().is_some() + } +} + +#[allow(clippy::too_many_arguments)] +async fn execute_asgi( + request: RequestData, + socket_data: &SocketData, + error_logger: &ErrorLogger, + wwwroot: &Path, + execute_pathbuf: PathBuf, + _path_info: Option, + _server_administrator_email: Option<&str>, + asgi_tx: Sender, + asgi_rx: Receiver, +) -> Result> { + let (hyper_request, _, original_request_uri) = request.into_parts(); + let (hyper_request_parts, request_body) = hyper_request.into_parts(); + asgi_tx + .send(IncomingAsgiMessage::Init(AsgiInitData::Http( + AsgiHttpInitData { + hyper_request_parts, + original_request_uri, + socket_data: SocketData { + remote_addr: socket_data.remote_addr, + local_addr: socket_data.local_addr, + encrypted: socket_data.encrypted, + }, + error_logger: error_logger.clone(), + wwwroot: wwwroot.to_path_buf(), + execute_pathbuf, + }, + ))) + .await?; + + let mut request_body_stream = request_body.into_data_stream(); + let asgi_tx_clone = asgi_tx.clone(); + + tokio::spawn(async move { + loop { + match request_body_stream.next().await { + Some(Ok(data)) => asgi_tx_clone + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::HttpRequest(AsgiHttpBody { + body: data.to_vec(), + more_body: true, + }), + )) + .await + .unwrap_or_default(), + Some(Err(_)) => { + asgi_tx_clone + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::HttpDisconnect, + )) + .await + .unwrap_or_default(); + } + None => { + asgi_tx_clone + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::HttpRequest(AsgiHttpBody { + body: b"".to_vec(), + more_body: false, + }), + )) + .await + .unwrap_or_default(); + break; + } + } + } + }); + + let asgi_http_response_start; + + loop { + match asgi_rx.recv().await? { + OutgoingAsgiMessage::Finished => Err(anyhow::anyhow!( + "ASGI application returned before sending the HTTP response start event" + ))?, + OutgoingAsgiMessage::Error(err) => Err(err)?, + OutgoingAsgiMessage::Message(OutgoingAsgiMessageInner::HttpResponseStart( + http_response_start, + )) => { + asgi_http_response_start = http_response_start; + break; + } + _ => (), + } + } + + let response_body_stream = futures_util::stream::unfold( + (asgi_tx, asgi_rx, false), + move |(asgi_tx, asgi_rx, request_end)| { + let has_trailers = asgi_http_response_start.trailers; + async move { + if request_end { + asgi_tx + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::HttpDisconnect, + )) + .await + .unwrap_or_default(); + return None; + } + loop { + match asgi_rx.recv().await { + Err(err) => { + return Some(( + Err(std::io::Error::other(err.to_string())), + (asgi_tx, asgi_rx, false), + )) + } + Ok(OutgoingAsgiMessage::Finished) => return None, + Ok(OutgoingAsgiMessage::Error(err)) => { + return Some(( + Err(std::io::Error::other(err.to_string())), + (asgi_tx, asgi_rx, false), + )) + } + Ok(OutgoingAsgiMessage::Message(OutgoingAsgiMessageInner::HttpResponseBody( + http_response_body, + ))) => { + if !http_response_body.more_body { + if http_response_body.body.is_empty() { + if !has_trailers { + asgi_tx + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::HttpDisconnect, + )) + .await + .unwrap_or_default(); + return None; + } + } else { + return Some(( + Ok(Frame::data(Bytes::from(http_response_body.body))), + (asgi_tx, asgi_rx, !has_trailers), + )); + } + } else if !http_response_body.body.is_empty() { + return Some(( + Ok(Frame::data(Bytes::from(http_response_body.body))), + (asgi_tx, asgi_rx, false), + )); + } + } + Ok(OutgoingAsgiMessage::Message(OutgoingAsgiMessageInner::HttpResponseTrailers( + http_response_trailers, + ))) => { + if !http_response_trailers.more_trailers { + if http_response_trailers.headers.is_empty() { + asgi_tx + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::HttpDisconnect, + )) + .await + .unwrap_or_default(); + return None; + } else { + match async { + let mut headers = HeaderMap::new(); + for (header_name, header_value) in http_response_trailers.headers { + if !header_name.is_empty() && header_name[0] != b':' { + headers.append( + HeaderName::from_bytes(&header_name)?, + HeaderValue::from_bytes(&header_value)?, + ); + } + } + Ok::<_, Box>(headers) + } + .await + { + Ok(headers) => { + return Some((Ok(Frame::trailers(headers)), (asgi_tx, asgi_rx, true))) + } + Err(err) => { + return Some(( + Err(std::io::Error::other(err.to_string())), + (asgi_tx, asgi_rx, false), + )) + } + } + } + } else if !http_response_trailers.headers.is_empty() { + match async { + let mut headers = HeaderMap::new(); + for (header_name, header_value) in http_response_trailers.headers { + if !header_name.is_empty() && header_name[0] != b':' { + headers.append( + HeaderName::from_bytes(&header_name)?, + HeaderValue::from_bytes(&header_value)?, + ); + } + } + Ok::<_, Box>(headers) + } + .await + { + Ok(headers) => { + return Some((Ok(Frame::trailers(headers)), (asgi_tx, asgi_rx, true))) + } + Err(err) => { + return Some(( + Err(std::io::Error::other(err.to_string())), + (asgi_tx, asgi_rx, false), + )) + } + } + } + } + _ => (), + } + } + } + }, + ); + let response_body = BodyExt::boxed(StreamBody::new(response_body_stream)); + + let mut hyper_response = Response::new(response_body); + *hyper_response.status_mut() = StatusCode::from_u16(asgi_http_response_start.status)?; + let headers = hyper_response.headers_mut(); + for (header_name, header_value) in asgi_http_response_start.headers { + if !header_name.is_empty() && header_name[0] != b':' { + headers.append( + HeaderName::from_bytes(&header_name)?, + HeaderValue::from_bytes(&header_value)?, + ); + } + } + + Ok( + ResponseData::builder_without_request() + .response(hyper_response) + .build(), + ) +} + +#[allow(clippy::too_many_arguments)] +async fn execute_asgi_websocket( + websocket: HyperWebsocket, + uri: &hyper::Uri, + socket_data: &SocketData, + error_logger: &ErrorLogger, + wwwroot: &Path, + execute_pathbuf: PathBuf, + _path_info: Option, + _server_administrator_email: Option<&str>, + asgi_tx: Sender, + asgi_rx: Receiver, +) -> Result<(), Box> { + asgi_tx + .send(IncomingAsgiMessage::Init(AsgiInitData::Websocket( + AsgiWebsocketInitData { + uri: uri.to_owned(), + socket_data: SocketData { + remote_addr: socket_data.remote_addr, + local_addr: socket_data.local_addr, + encrypted: socket_data.encrypted, + }, + error_logger: error_logger.clone(), + wwwroot: wwwroot.to_path_buf(), + execute_pathbuf, + }, + ))) + .await?; + + asgi_tx + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::WebsocketConnect, + )) + .await?; + + let client_bi_stream; + loop { + match asgi_rx.recv().await? { + OutgoingAsgiMessage::Finished => Err(anyhow::anyhow!( + "ASGI application returned before sending the WebSocket accept event" + ))?, + OutgoingAsgiMessage::Error(err) => Err(err)?, + OutgoingAsgiMessage::Message(OutgoingAsgiMessageInner::WebsocketAccept(_)) => { + client_bi_stream = websocket.await?; + break; + } + OutgoingAsgiMessage::Message(OutgoingAsgiMessageInner::WebsocketClose(_)) => { + asgi_tx + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::WebsocketDisconnect(AsgiWebsocketClose { + code: 1005, + reason: "ASGI application closed the WebSocket connection before accepting it" + .to_string(), + }), + )) + .await + .unwrap_or_default(); + } + _ => (), + } + } + + let (client_sink, mut client_stream) = client_bi_stream.split(); + + let client_disconnected_mutex = Arc::new(Mutex::new(AtomicBool::new(false))); + let client_disconnected_mutex_clone = client_disconnected_mutex.clone(); + + let asgi_tx_clone = asgi_tx.clone(); + let (ping, pong) = async_channel::unbounded(); + + tokio::spawn(async move { + while let Some(websocket_frame) = client_stream.next().await { + match websocket_frame { + Err(_) => { + let client_disconnected = client_disconnected_mutex_clone.lock().await; + if !client_disconnected.load(Ordering::Relaxed) { + client_disconnected.store(true, Ordering::Relaxed); + asgi_tx_clone + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::WebsocketDisconnect(AsgiWebsocketClose { + code: 1005, + reason: "Error while receiving WebSocket data".to_string(), + }), + )) + .await + .unwrap_or_default(); + } + } + Ok(Message::Ping(message)) => { + ping.send(message).await.unwrap_or_default(); + } + Ok(Message::Binary(message)) => { + asgi_tx_clone + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::WebsocketReceive(AsgiWebsocketMessage { + bytes: Some(message.to_vec()), + text: None, + }), + )) + .await + .unwrap_or_default(); + } + Ok(Message::Text(message)) => { + asgi_tx_clone + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::WebsocketReceive(AsgiWebsocketMessage { + bytes: None, + text: Some(message.to_string()), + }), + )) + .await + .unwrap_or_default(); + } + Ok(Message::Close(close_frame)) => { + let client_disconnected = client_disconnected_mutex_clone.lock().await; + if !client_disconnected.load(Ordering::Relaxed) { + client_disconnected.store(true, Ordering::Relaxed); + client_disconnected_mutex_clone + .lock() + .await + .store(true, Ordering::Relaxed); + let (status_code, message) = if let Some(close_frame) = close_frame { + (close_frame.code.into(), close_frame.reason.to_string()) + } else { + ( + 1005, + "Websocket connection closed for unknown reason".to_string(), + ) + }; + asgi_tx_clone + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::WebsocketDisconnect(AsgiWebsocketClose { + code: status_code, + reason: message, + }), + )) + .await + .unwrap_or_default(); + } + } + _ => (), + } + } + }); + + let client_sink_mutex = Arc::new(Mutex::new(client_sink)); + let client_sink_mutex_cloned = client_sink_mutex.clone(); + + tokio::spawn(async move { + while let Ok(message) = pong.recv().await { + if client_sink_mutex_cloned + .lock() + .await + .send(Message::Pong(message)) + .await + .is_err() + { + break; + } + } + }); + + loop { + match asgi_rx.recv().await? { + OutgoingAsgiMessage::Finished => Err(anyhow::anyhow!( + "ASGI application returned before sending the WebSocket accept event" + ))?, + OutgoingAsgiMessage::Error(err) => Err(err)?, + OutgoingAsgiMessage::Message(OutgoingAsgiMessageInner::WebsocketSend(websocket_message)) => { + let frame_option = if let Some(bytes) = websocket_message.bytes { + Some(Message::binary(bytes)) + } else { + websocket_message.text.map(Message::text) + }; + if let Some(frame) = frame_option { + let mut client_sink = client_sink_mutex.lock().await; + if let Err(err) = client_sink.send(frame).await { + drop(client_sink); + let client_disconnected = client_disconnected_mutex.lock().await; + if !client_disconnected.load(Ordering::Relaxed) { + client_disconnected.store(true, Ordering::Relaxed); + asgi_tx + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::WebsocketDisconnect(AsgiWebsocketClose { + code: 1005, + reason: "Error while sending WebSocket data".to_string(), + }), + )) + .await + .unwrap_or_default(); + } + Err(err)?; + } + } + } + OutgoingAsgiMessage::Message(OutgoingAsgiMessageInner::WebsocketClose(websocket_close)) => { + let client_disconnected = client_disconnected_mutex.lock().await; + if !client_disconnected.load(Ordering::Relaxed) { + client_disconnected.store(true, Ordering::Relaxed); + asgi_tx + .send(IncomingAsgiMessage::Message( + IncomingAsgiMessageInner::WebsocketDisconnect(AsgiWebsocketClose { + code: websocket_close.code, + reason: websocket_close.reason.clone(), + }), + )) + .await + .unwrap_or_default(); + } + let mut client_sink = client_sink_mutex.lock().await; + client_sink + .send(Message::Close(Some(CloseFrame { + code: websocket_close.code.into(), + reason: websocket_close.reason.into(), + }))) + .await?; + client_sink.close().await.unwrap_or_default(); + break; + } + _ => (), + } + } + + Ok(()) +} diff --git a/ferron/src/optional_modules/cache.rs b/ferron/src/optional_modules/cache.rs new file mode 100644 index 0000000000000000000000000000000000000000..b9dac760b7d7c8b98768bd5c8f65819d67adaffe --- /dev/null +++ b/ferron/src/optional_modules/cache.rs @@ -0,0 +1,525 @@ +use std::collections::HashMap; +use std::error::Error; +use std::hash::RandomState; +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use crate::ferron_common::{ + ErrorLogger, HyperUpgraded, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperResponse, WithRuntime}; +use async_trait::async_trait; +use cache_control::{Cachability, CacheControl}; +use futures_util::{StreamExt, TryStreamExt}; +use hashlink::LinkedHashMap; +use http_body_util::{BodyExt, Full, StreamBody}; +use hyper::body::{Bytes, Frame}; +use hyper::header::HeaderValue; +use hyper::{header, HeaderMap, Method, Response, StatusCode}; +use hyper_tungstenite::HyperWebsocket; +use itertools::Itertools; +use tokio::runtime::Handle; +use tokio::sync::RwLock; + +const CACHE_HEADER_NAME: &str = "X-Ferron-Cache"; +const DEFAULT_MAX_AGE: u64 = 300; + +pub fn server_module_init( + config: &ServerConfig, +) -> Result, Box> { + let maximum_cache_entries = config["global"]["maximumCacheEntries"] + .as_i64() + .map(|v| v as usize); + + Ok(Box::new(CacheModule::new( + Arc::new(RwLock::new(LinkedHashMap::with_hasher(RandomState::new()))), + Arc::new(RwLock::new(HashMap::new())), + maximum_cache_entries, + ))) +} + +#[allow(clippy::type_complexity)] +struct CacheModule { + cache: Arc< + RwLock< + LinkedHashMap< + String, + ( + StatusCode, + HeaderMap, + Vec, + Instant, + Option, + ), + RandomState, + >, + >, + >, + vary_cache: Arc>>>, + maximum_cache_entries: Option, +} + +impl CacheModule { + #[allow(clippy::type_complexity)] + fn new( + cache: Arc< + RwLock< + LinkedHashMap< + String, + ( + StatusCode, + HeaderMap, + Vec, + Instant, + Option, + ), + RandomState, + >, + >, + >, + vary_cache: Arc>>>, + maximum_cache_entries: Option, + ) -> Self { + Self { + cache, + vary_cache, + maximum_cache_entries, + } + } +} + +impl ServerModule for CacheModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(CacheModuleHandlers { + cache: self.cache.clone(), + vary_cache: self.vary_cache.clone(), + maximum_cache_entries: self.maximum_cache_entries, + cache_vary_headers_configured: Vec::new(), + cache_ignore_headers_configured: Vec::new(), + maximum_cached_response_size: None, + cache_key: None, + request_headers: HeaderMap::new(), + has_authorization: false, + cached: false, + no_store: false, + handle, + }) + } +} + +#[allow(clippy::type_complexity)] +struct CacheModuleHandlers { + handle: Handle, + cache: Arc< + RwLock< + LinkedHashMap< + String, + ( + StatusCode, + HeaderMap, + Vec, + Instant, + Option, + ), + RandomState, + >, + >, + >, + vary_cache: Arc>>>, + maximum_cache_entries: Option, + cache_vary_headers_configured: Vec, + cache_ignore_headers_configured: Vec, + maximum_cached_response_size: Option, + cache_key: Option, + request_headers: HeaderMap, + has_authorization: bool, + cached: bool, + no_store: bool, +} + +#[async_trait] +impl ServerModuleHandlers for CacheModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + self.cache_vary_headers_configured = match config["cacheVaryHeaders"].as_vec() { + Some(vector) => { + let mut new_vector = Vec::new(); + for yaml_value in vector.iter() { + if let Some(str_value) = yaml_value.as_str() { + new_vector.push(str_value.to_string()); + } + } + new_vector + } + None => Vec::new(), + }; + self.cache_ignore_headers_configured = match config["cacheIgnoreHeaders"].as_vec() { + Some(vector) => { + let mut new_vector = Vec::new(); + for yaml_value in vector.iter() { + if let Some(str_value) = yaml_value.as_str() { + new_vector.push(str_value.to_string()); + } + } + new_vector + } + None => Vec::new(), + }; + self.maximum_cached_response_size = config["maximumCachedResponseSize"] + .as_i64() + .map(|f| f as u64); + + let hyper_request = request.get_hyper_request(); + let cache_key = format!( + "{} {}{}{}{}", + hyper_request.method().as_str(), + match socket_data.encrypted { + false => "http://", + true => "https://", + }, + match hyper_request.headers().get(header::HOST) { + Some(host) => String::from_utf8_lossy(host.as_bytes()).into_owned(), + None => "".to_string(), + }, + hyper_request.uri().path(), + match hyper_request.uri().query() { + Some(query) => format!("?{}", query), + None => "".to_string(), + } + ); + + let request_cache_control = match hyper_request.headers().get(header::CACHE_CONTROL) { + Some(value) => CacheControl::from_value(&String::from_utf8_lossy(value.as_bytes())), + None => None, + }; + + let mut no_store = false; + let mut no_cache = false; + + if let Some(request_cache_control) = request_cache_control { + no_store = request_cache_control.no_store; + if let Some(cachability) = request_cache_control.cachability { + if cachability == Cachability::NoCache { + no_cache = true; + } + } + } + + match hyper_request.method() { + &Method::GET | &Method::HEAD => (), + _ => { + no_store = true; + } + }; + + if no_store { + self.no_store = true; + return Ok(ResponseData::builder(request).build()); + } + + if !no_cache { + let rwlock_read = self.vary_cache.read().await; + let processed_vary = rwlock_read.get(&cache_key); + if let Some(processed_vary) = processed_vary { + let cache_key_with_vary = format!( + "{}\n{}", + &cache_key, + processed_vary + .iter() + .map(|header_name| { + match hyper_request.headers().get(header_name) { + Some(header_value) => format!( + "{}: {}", + header_name, + String::from_utf8_lossy(header_value.as_bytes()).into_owned() + ), + None => "".to_string(), + } + }) + .collect::>() + .join("\n") + ); + + drop(rwlock_read); + + let rwlock_read = self.cache.read().await; + let cached_entry_option = rwlock_read.get(&cache_key_with_vary); + + if let Some((status_code, headers, body, timestamp, response_cache_control)) = + cached_entry_option + { + let max_age = match response_cache_control { + Some(response_cache_control) => match response_cache_control.s_max_age { + Some(s_max_age) => Some(s_max_age), + None => response_cache_control.max_age, + }, + None => None, + }; + + let mut cached = true; + + if timestamp.elapsed() > max_age.unwrap_or(Duration::from_secs(DEFAULT_MAX_AGE)) { + cached = false; + } + + if cached { + self.cached = true; + let mut hyper_response_builder = Response::builder().status(status_code); + for (header_name, header_value) in headers.iter() { + hyper_response_builder = hyper_response_builder.header(header_name, header_value); + } + let hyper_response = hyper_response_builder.body( + Full::new(Bytes::from(body.clone())) + .map_err(|e| match e {}) + .boxed(), + )?; + return Ok( + ResponseData::builder(request) + .response(hyper_response) + .build(), + ); + } else { + drop(rwlock_read); + } + } + } else { + drop(rwlock_read); + } + } + + self.request_headers = hyper_request.headers().clone(); + self.cache_key = Some(cache_key); + self.has_authorization = hyper_request.headers().contains_key(header::AUTHORIZATION); + + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + mut response: HyperResponse, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + if self.no_store { + response + .headers_mut() + .insert(CACHE_HEADER_NAME, HeaderValue::from_str("BYPASS")?); + Ok(response) + } else if self.cached { + response + .headers_mut() + .insert(CACHE_HEADER_NAME, HeaderValue::from_str("HIT")?); + Ok(response) + } else if let Some(cache_key) = &self.cache_key { + let (mut response_parts, mut response_body) = response.into_parts(); + let response_cache_control = match response_parts.headers.get(header::CACHE_CONTROL) { + Some(value) => CacheControl::from_value(&String::from_utf8_lossy(value.as_bytes())), + None => None, + }; + + let should_cache_response = match &response_cache_control { + Some(response_cache_control) => { + let is_private = response_cache_control.cachability == Some(Cachability::Private); + let is_public = response_cache_control.cachability == Some(Cachability::Public); + + !response_cache_control.no_store + && !is_private + && (is_public + || (!self.has_authorization + && (response_cache_control.max_age.is_some() + || response_cache_control.s_max_age.is_some()))) + } + None => false, + }; + + if should_cache_response { + let mut response_body_buffer = Vec::new(); + let mut maximum_cached_response_size_exceeded = false; + + while let Some(frame) = response_body.frame().await { + let frame_unwrapped = frame?; + if frame_unwrapped.is_data() { + if let Some(bytes) = frame_unwrapped.data_ref() { + response_body_buffer.extend_from_slice(bytes); + if let Some(maximum_cached_response_size) = self.maximum_cached_response_size { + if response_body_buffer.len() as u64 > maximum_cached_response_size { + maximum_cached_response_size_exceeded = true; + break; + } + } + } + } + } + + if maximum_cached_response_size_exceeded { + let cached_stream = + futures_util::stream::once(async move { Ok(Bytes::from(response_body_buffer)) }); + let response_stream = response_body.into_data_stream(); + let chained_stream = cached_stream.chain(response_stream); + let stream_body = StreamBody::new(chained_stream.map_ok(Frame::data)); + let response_body = BodyExt::boxed(stream_body); + response_parts + .headers + .insert(CACHE_HEADER_NAME, HeaderValue::from_str("MISS")?); + let response = Response::from_parts(response_parts, response_body); + Ok(response) + } else { + let mut response_vary = match response_parts.headers.get(header::VARY) { + Some(value) => String::from_utf8_lossy(value.as_bytes()) + .split(",") + .map(|s| s.trim().to_owned()) + .collect(), + None => Vec::new(), + }; + + let mut processed_vary_orig = self.cache_vary_headers_configured.clone(); + processed_vary_orig.append(&mut response_vary); + + let processed_vary = processed_vary_orig + .iter() + .unique() + .map(|s| s.to_owned()) + .collect::>(); + + if !processed_vary.contains(&"*".to_string()) { + let cache_key_with_vary = format!( + "{}\n{}", + &cache_key, + processed_vary + .iter() + .map(|header_name| { + match self.request_headers.get(header_name) { + Some(header_value) => format!( + "{}: {}", + header_name, + String::from_utf8_lossy(header_value.as_bytes()).into_owned() + ), + None => "".to_string(), + } + }) + .collect::>() + .join("\n") + ); + + let mut rwlock_write = self.vary_cache.write().await; + rwlock_write.insert(cache_key.clone(), processed_vary); + drop(rwlock_write); + + let mut written_headers = response_parts.headers.clone(); + for header in self.cache_ignore_headers_configured.iter() { + while written_headers.remove(header).is_some() {} + } + + let mut rwlock_write = self.cache.write().await; + rwlock_write.retain(|_, (_, _, _, timestamp, response_cache_control)| { + let max_age = match response_cache_control { + Some(response_cache_control) => match response_cache_control.s_max_age { + Some(s_max_age) => Some(s_max_age), + None => response_cache_control.max_age, + }, + None => None, + }; + + timestamp.elapsed() <= max_age.unwrap_or(Duration::from_secs(DEFAULT_MAX_AGE)) + }); + + if let Some(maximum_cache_entries) = self.maximum_cache_entries { + // Remove a value at the front of the list + while !rwlock_write.is_empty() && rwlock_write.len() >= maximum_cache_entries { + rwlock_write.pop_front(); + } + } + + // This inserts a value at the back of the list + rwlock_write.insert( + cache_key_with_vary, + ( + response_parts.status, + written_headers, + response_body_buffer.clone(), + Instant::now(), + response_cache_control, + ), + ); + drop(rwlock_write); + } + + let cached_stream = + futures_util::stream::once(async move { Ok(Bytes::from(response_body_buffer)) }); + let stream_body = StreamBody::new(cached_stream.map_ok(Frame::data)); + let response_body = BodyExt::boxed(stream_body); + response_parts + .headers + .insert(CACHE_HEADER_NAME, HeaderValue::from_str("MISS")?); + let response = Response::from_parts(response_parts, response_body); + Ok(response) + } + } else { + response_parts + .headers + .insert(CACHE_HEADER_NAME, HeaderValue::from_str("MISS")?); + let response = Response::from_parts(response_parts, response_body); + Ok(response) + } + } else { + Ok(response) + } + }) + .await + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} diff --git a/ferron/src/optional_modules/cgi.rs b/ferron/src/optional_modules/cgi.rs new file mode 100644 index 0000000000000000000000000000000000000000..e39e1e138eed22ad4ef97e8fbefc5e123ed886e8 --- /dev/null +++ b/ferron/src/optional_modules/cgi.rs @@ -0,0 +1,859 @@ +// CGI handler code inspired by SVR.JS's RedBrick mod, translated from JavaScript to Rust. +use std::collections::HashMap; +use std::error::Error; +use std::path::{Path, PathBuf}; +use std::process::Stdio; +use std::sync::Arc; +use std::time::Duration; + +use crate::ferron_common::{ + ErrorLogger, HyperRequest, HyperResponse, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperUpgraded, WithRuntime}; +use async_trait::async_trait; +use futures_util::TryStreamExt; +use hashlink::LinkedHashMap; +use http_body_util::{BodyExt, StreamBody}; +use httparse::EMPTY_HEADER; +use hyper::body::Frame; +use hyper::{header, Response, StatusCode}; +use hyper_tungstenite::HyperWebsocket; +use tokio::fs; +use tokio::io::AsyncReadExt; +use tokio::process::Command; +use tokio::runtime::Handle; +use tokio::sync::RwLock; +use tokio_util::io::{ReaderStream, StreamReader}; + +use crate::ferron_res::server_software::SERVER_SOFTWARE; +use crate::ferron_util::cgi_response::CgiResponse; +use crate::ferron_util::copy_move::Copier; +use crate::ferron_util::ttl_cache::TtlCache; + +pub fn server_module_init( + _config: &ServerConfig, +) -> Result, Box> { + let cache = Arc::new(RwLock::new(TtlCache::new(Duration::from_millis(100)))); + Ok(Box::new(CgiModule::new(cache))) +} + +#[allow(clippy::type_complexity)] +struct CgiModule { + path_cache: Arc, Option)>>>, +} + +impl CgiModule { + #[allow(clippy::type_complexity)] + fn new(path_cache: Arc, Option)>>>) -> Self { + Self { path_cache } + } +} + +impl ServerModule for CgiModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(CgiModuleHandlers { + path_cache: self.path_cache.clone(), + handle, + }) + } +} + +#[allow(clippy::type_complexity)] +struct CgiModuleHandlers { + handle: Handle, + path_cache: Arc, Option)>>>, +} + +#[async_trait] +impl ServerModuleHandlers for CgiModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + let mut cgi_script_exts = Vec::new(); + + let cgi_script_exts_yaml = &config["cgiScriptExtensions"]; + if let Some(cgi_script_exts_obtained) = cgi_script_exts_yaml.as_vec() { + for cgi_script_ext_yaml in cgi_script_exts_obtained.iter() { + if let Some(cgi_script_ext) = cgi_script_ext_yaml.as_str() { + cgi_script_exts.push(cgi_script_ext); + } + } + } + + if let Some(wwwroot) = config["wwwroot"].as_str() { + let hyper_request = request.get_hyper_request(); + + let request_path = hyper_request.uri().path(); + let mut request_path_bytes = request_path.bytes(); + if request_path_bytes.len() < 1 || request_path_bytes.nth(0) != Some(b'/') { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + + let cache_key = format!( + "{}{}{}", + match config["ip"].as_str() { + Some(ip) => format!("{}-", ip), + None => String::from(""), + }, + match config["domain"].as_str() { + Some(domain) => format!("{}-", domain), + None => String::from(""), + }, + request_path + ); + + let wwwroot_unknown = PathBuf::from(wwwroot); + let wwwroot_pathbuf = match wwwroot_unknown.as_path().is_absolute() { + true => wwwroot_unknown, + false => match fs::canonicalize(&wwwroot_unknown).await { + Ok(pathbuf) => pathbuf, + Err(_) => wwwroot_unknown, + }, + }; + let wwwroot = wwwroot_pathbuf.as_path(); + + let read_rwlock = self.path_cache.read().await; + let (execute_pathbuf, execute_path_info) = match read_rwlock.get(&cache_key) { + Some(data) => { + drop(read_rwlock); + data + } + None => { + drop(read_rwlock); + let mut relative_path = &request_path[1..]; + while relative_path.as_bytes().first().copied() == Some(b'/') { + relative_path = &relative_path[1..]; + } + + let decoded_relative_path = match urlencoding::decode(relative_path) { + Ok(path) => path.to_string(), + Err(_) => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let joined_pathbuf = wwwroot.join(decoded_relative_path); + let mut execute_pathbuf: Option = None; + let mut execute_path_info: Option = None; + + match fs::metadata(&joined_pathbuf).await { + Ok(metadata) => { + if metadata.is_file() { + let mut request_path_normalized = match cfg!(windows) { + true => request_path.to_lowercase(), + false => request_path.to_string(), + }; + while request_path_normalized.contains("//") { + request_path_normalized = request_path_normalized.replace("//", "/"); + } + if request_path_normalized == "/cgi-bin" + || request_path_normalized.starts_with("/cgi-bin/") + { + execute_pathbuf = Some(joined_pathbuf); + } else { + let contained_extension = joined_pathbuf + .extension() + .map(|a| format!(".{}", a.to_string_lossy())); + if let Some(contained_extension) = contained_extension { + if cgi_script_exts.contains(&(&contained_extension as &str)) { + execute_pathbuf = Some(joined_pathbuf); + } + } + } + } else if metadata.is_dir() { + let indexes = vec!["index.php", "index.cgi"]; + for index in indexes { + let temp_joined_pathbuf = joined_pathbuf.join(index); + match fs::metadata(&temp_joined_pathbuf).await { + Ok(temp_metadata) => { + if temp_metadata.is_file() { + let request_path_normalized = match cfg!(windows) { + true => request_path.to_lowercase(), + false => request_path.to_string(), + }; + if request_path_normalized == "/cgi-bin" + || request_path_normalized.starts_with("/cgi-bin/") + { + execute_pathbuf = Some(temp_joined_pathbuf); + break; + } else { + let contained_extension = temp_joined_pathbuf + .extension() + .map(|a| format!(".{}", a.to_string_lossy())); + if let Some(contained_extension) = contained_extension { + if cgi_script_exts.contains(&(&contained_extension as &str)) { + execute_pathbuf = Some(temp_joined_pathbuf); + break; + } + } + } + } + } + Err(_) => continue, + }; + } + } + } + Err(err) => { + if err.kind() == tokio::io::ErrorKind::NotADirectory { + // TODO: find a file + let mut temp_pathbuf = joined_pathbuf.clone(); + loop { + if !temp_pathbuf.pop() { + break; + } + match fs::metadata(&temp_pathbuf).await { + Ok(metadata) => { + if metadata.is_file() { + let temp_path = temp_pathbuf.as_path(); + if !temp_path.starts_with(wwwroot) { + // Traversed above the webroot, so ignore that. + break; + } + let path_info = match joined_pathbuf.as_path().strip_prefix(temp_path) { + Ok(path) => { + let path = path.to_string_lossy().to_string(); + Some(match cfg!(windows) { + true => path.replace("\\", "/"), + false => path, + }) + } + Err(_) => None, + }; + let mut request_path_normalized = match cfg!(windows) { + true => request_path.to_lowercase(), + false => request_path.to_string(), + }; + while request_path_normalized.contains("//") { + request_path_normalized = request_path_normalized.replace("//", "/"); + } + if request_path_normalized == "/cgi-bin" + || request_path_normalized.starts_with("/cgi-bin/") + { + execute_pathbuf = Some(temp_pathbuf); + execute_path_info = path_info; + break; + } else { + let contained_extension = temp_pathbuf + .extension() + .map(|a| format!(".{}", a.to_string_lossy())); + if let Some(contained_extension) = contained_extension { + if cgi_script_exts.contains(&(&contained_extension as &str)) { + execute_pathbuf = Some(temp_pathbuf); + execute_path_info = path_info; + break; + } + } + } + } else { + break; + } + } + Err(err) => match err.kind() { + tokio::io::ErrorKind::NotADirectory => (), + _ => break, + }, + }; + } + } + } + }; + let data = (execute_pathbuf, execute_path_info); + + let mut write_rwlock = self.path_cache.write().await; + write_rwlock.cleanup(); + write_rwlock.insert(cache_key, data.clone()); + drop(write_rwlock); + data + } + }; + + if let Some(execute_pathbuf) = execute_pathbuf { + let mut cgi_interpreters = HashMap::new(); + cgi_interpreters.insert(".pl".to_string(), vec!["perl".to_string()]); + cgi_interpreters.insert(".py".to_string(), vec!["python".to_string()]); + cgi_interpreters.insert(".sh".to_string(), vec!["bash".to_string()]); + cgi_interpreters.insert(".ksh".to_string(), vec!["ksh".to_string()]); + cgi_interpreters.insert(".csh".to_string(), vec!["csh".to_string()]); + cgi_interpreters.insert(".rb".to_string(), vec!["ruby".to_string()]); + cgi_interpreters.insert(".php".to_string(), vec!["php-cgi".to_string()]); + if cfg!(windows) { + cgi_interpreters.insert(".exe".to_string(), vec![]); + cgi_interpreters.insert( + ".bat".to_string(), + vec!["cmd".to_string(), "/c".to_string()], + ); + cgi_interpreters.insert(".vbs".to_string(), vec!["cscript".to_string()]); + } + + let cgi_interpreters_yaml = &config["cgiScriptInterpreters"]; + if let Some(cgi_interpreters_hashmap) = cgi_interpreters_yaml.as_hash() { + for (key_yaml, value_yaml) in cgi_interpreters_hashmap.iter() { + if let Some(key) = key_yaml.as_str() { + if value_yaml.is_null() { + cgi_interpreters.remove(key); + } else if let Some(value) = value_yaml.as_vec() { + let mut params = Vec::new(); + for param_yaml in value.iter() { + if let Some(param) = param_yaml.as_str() { + params.push(param.to_string()); + } + } + cgi_interpreters.insert(key.to_string(), params); + } + } + } + } + + return execute_cgi_with_environment_variables( + request, + socket_data, + error_logger, + wwwroot, + execute_pathbuf, + execute_path_info, + config["serverAdministratorEmail"].as_str(), + cgi_interpreters, + ) + .await; + } + } + + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} + +#[allow(clippy::too_many_arguments)] +async fn execute_cgi_with_environment_variables( + request: RequestData, + socket_data: &SocketData, + error_logger: &ErrorLogger, + wwwroot: &Path, + execute_pathbuf: PathBuf, + path_info: Option, + server_administrator_email: Option<&str>, + cgi_interpreters: HashMap>, +) -> Result> { + let mut environment_variables: LinkedHashMap = LinkedHashMap::new(); + + let hyper_request = request.get_hyper_request(); + let original_request_uri = request.get_original_url().unwrap_or(hyper_request.uri()); + + if let Some(auth_user) = request.get_auth_user() { + if let Some(authorization) = hyper_request.headers().get(header::AUTHORIZATION) { + let authorization_value = String::from_utf8_lossy(authorization.as_bytes()).to_string(); + let mut authorization_value_split = authorization_value.split(" "); + if let Some(authorization_type) = authorization_value_split.next() { + environment_variables.insert("AUTH_TYPE".to_string(), authorization_type.to_string()); + } + } + environment_variables.insert("REMOTE_USER".to_string(), auth_user.to_string()); + } + + environment_variables.insert( + "QUERY_STRING".to_string(), + match hyper_request.uri().query() { + Some(query) => query.to_string(), + None => "".to_string(), + }, + ); + + environment_variables.insert("SERVER_SOFTWARE".to_string(), SERVER_SOFTWARE.to_string()); + environment_variables.insert( + "SERVER_PROTOCOL".to_string(), + match hyper_request.version() { + hyper::Version::HTTP_09 => "HTTP/0.9".to_string(), + hyper::Version::HTTP_10 => "HTTP/1.0".to_string(), + hyper::Version::HTTP_11 => "HTTP/1.1".to_string(), + hyper::Version::HTTP_2 => "HTTP/2.0".to_string(), + hyper::Version::HTTP_3 => "HTTP/3.0".to_string(), + _ => "HTTP/Unknown".to_string(), + }, + ); + environment_variables.insert( + "SERVER_PORT".to_string(), + socket_data.local_addr.port().to_string(), + ); + environment_variables.insert( + "SERVER_ADDR".to_string(), + socket_data.local_addr.ip().to_canonical().to_string(), + ); + if let Some(server_administrator_email) = server_administrator_email { + environment_variables.insert( + "SERVER_ADMIN".to_string(), + server_administrator_email.to_string(), + ); + } + if let Some(host) = hyper_request.headers().get(header::HOST) { + environment_variables.insert( + "SERVER_NAME".to_string(), + String::from_utf8_lossy(host.as_bytes()).to_string(), + ); + } + + environment_variables.insert( + "DOCUMENT_ROOT".to_string(), + wwwroot.to_string_lossy().to_string(), + ); + environment_variables.insert( + "PATH_INFO".to_string(), + match &path_info { + Some(path_info) => format!("/{}", path_info), + None => "".to_string(), + }, + ); + environment_variables.insert( + "PATH_TRANSLATED".to_string(), + match &path_info { + Some(path_info) => { + let mut path_translated = execute_pathbuf.clone(); + path_translated.push(path_info); + path_translated.to_string_lossy().to_string() + } + None => "".to_string(), + }, + ); + environment_variables.insert( + "REQUEST_METHOD".to_string(), + hyper_request.method().to_string(), + ); + environment_variables.insert("GATEWAY_INTERFACE".to_string(), "CGI/1.1".to_string()); + environment_variables.insert( + "REQUEST_URI".to_string(), + format!( + "{}{}", + original_request_uri.path(), + match original_request_uri.query() { + Some(query) => format!("?{}", query), + None => String::from(""), + } + ), + ); + + environment_variables.insert( + "REMOTE_PORT".to_string(), + socket_data.remote_addr.port().to_string(), + ); + environment_variables.insert( + "REMOTE_ADDR".to_string(), + socket_data.remote_addr.ip().to_canonical().to_string(), + ); + + environment_variables.insert( + "SCRIPT_FILENAME".to_string(), + execute_pathbuf.to_string_lossy().to_string(), + ); + if let Ok(script_path) = execute_pathbuf.as_path().strip_prefix(wwwroot) { + environment_variables.insert( + "SCRIPT_NAME".to_string(), + format!( + "/{}", + match cfg!(windows) { + true => script_path.to_string_lossy().to_string().replace("\\", "/"), + false => script_path.to_string_lossy().to_string(), + } + ), + ); + } + + if socket_data.encrypted { + environment_variables.insert("HTTPS".to_string(), "ON".to_string()); + } + + for (header_name, header_value) in hyper_request.headers().iter() { + let env_header_name = match *header_name { + header::CONTENT_LENGTH => "CONTENT_LENGTH".to_string(), + header::CONTENT_TYPE => "CONTENT_TYPE".to_string(), + _ => { + let mut result = String::new(); + + result.push_str("HTTP_"); + + for c in header_name.as_str().to_uppercase().chars() { + if c.is_alphanumeric() { + result.push(c); + } else { + result.push('_'); + } + } + + result + } + }; + if environment_variables.contains_key(&env_header_name) { + let value = environment_variables.get_mut(&env_header_name); + if let Some(value) = value { + if env_header_name == "HTTP_COOKIE" { + value.push_str("; "); + } else { + // See https://stackoverflow.com/a/1801191 + value.push_str(", "); + } + value.push_str(String::from_utf8_lossy(header_value.as_bytes()).as_ref()); + } else { + environment_variables.insert( + env_header_name, + String::from_utf8_lossy(header_value.as_bytes()).to_string(), + ); + } + } else { + environment_variables.insert( + env_header_name, + String::from_utf8_lossy(header_value.as_bytes()).to_string(), + ); + } + } + + let (hyper_request, _, _) = request.into_parts(); + + execute_cgi( + hyper_request, + error_logger, + execute_pathbuf, + cgi_interpreters, + environment_variables, + ) + .await +} + +async fn execute_cgi( + hyper_request: HyperRequest, + error_logger: &ErrorLogger, + execute_pathbuf: PathBuf, + cgi_interpreters: HashMap>, + environment_variables: LinkedHashMap, +) -> Result> { + let (_, body) = hyper_request.into_parts(); + + let executable_params = match get_executable(&execute_pathbuf).await { + Ok(params) => params, + Err(err) => { + let contained_extension = execute_pathbuf + .extension() + .map(|a| format!(".{}", a.to_string_lossy())); + if let Some(contained_extension) = contained_extension { + if let Some(params_init) = cgi_interpreters.get(&contained_extension) { + let mut params: Vec = params_init.iter().map(|s| s.to_owned()).collect(); + params.push(execute_pathbuf.to_string_lossy().to_string()); + params + } else { + Err(err)? + } + } else { + Err(err)? + } + } + }; + + let mut executable_params_iter = executable_params.iter(); + + let mut command = Command::new(match executable_params_iter.next() { + Some(executable_name) => executable_name, + None => Err(anyhow::anyhow!("Cannot determine the executable"))?, + }); + + // Set standard I/O to be piped + command.stdin(Stdio::piped()); + command.stdout(Stdio::piped()); + command.stderr(Stdio::piped()); + + for param in executable_params_iter { + command.arg(param); + } + + command.envs(environment_variables); + + let mut execute_dir_pathbuf = execute_pathbuf.clone(); + execute_dir_pathbuf.pop(); + command.current_dir(execute_dir_pathbuf); + + let mut child = command.spawn()?; + + let cgi_stdin_reader = StreamReader::new(body.into_data_stream().map_err(std::io::Error::other)); + + let stdin = match child.stdin.take() { + Some(stdin) => stdin, + None => Err(anyhow::anyhow!( + "The CGI process doesn't have standard input" + ))?, + }; + let stdout = match child.stdout.take() { + Some(stdout) => stdout, + None => Err(anyhow::anyhow!( + "The CGI process doesn't have standard output" + ))?, + }; + let stderr = child.stderr.take(); + + let mut cgi_response = CgiResponse::new(stdout); + + let stdin_copy_future = Copier::new(cgi_stdin_reader, stdin).copy(); + let mut stdin_copy_future_pinned = Box::pin(stdin_copy_future); + + let mut headers = [EMPTY_HEADER; 128]; + + let mut early_stdin_copied = false; + + // Needed to wrap this in another scope to prevent errors with multiple mutable borrows. + { + let mut head_obtained = false; + let stdout_parse_future = cgi_response.get_head(); + tokio::pin!(stdout_parse_future); + + // Cannot use a loop with tokio::select, since stdin_copy_future_pinned being constantly ready will make the web server stop responding to HTTP requests + tokio::select! { + biased; + + obtained_head = &mut stdout_parse_future => { + let obtained_head = obtained_head?; + if !obtained_head.is_empty() { + httparse::parse_headers(obtained_head, &mut headers)?; + } + head_obtained = true; + }, + result = &mut stdin_copy_future_pinned => { + early_stdin_copied = true; + result?; + } + } + + if !head_obtained { + // Kept it same as in the tokio::select macro + let obtained_head = stdout_parse_future.await?; + if !obtained_head.is_empty() { + httparse::parse_headers(obtained_head, &mut headers)?; + } + } + } + + let mut response_builder = Response::builder(); + let mut status_code = 200; + for header in headers { + if header == EMPTY_HEADER { + break; + } + let mut is_status_header = false; + match &header.name.to_lowercase() as &str { + "location" => { + if !(300..=399).contains(&status_code) { + status_code = 302; + } + } + "status" => { + is_status_header = true; + let header_value_cow = String::from_utf8_lossy(header.value); + let mut split_status = header_value_cow.split(" "); + let first_part = split_status.next(); + if let Some(first_part) = first_part { + if first_part.starts_with("HTTP/") { + let second_part = split_status.next(); + if let Some(second_part) = second_part { + if let Ok(parsed_status_code) = second_part.parse::() { + status_code = parsed_status_code; + } + } + } else if let Ok(parsed_status_code) = first_part.parse::() { + status_code = parsed_status_code; + } + } + } + _ => (), + } + if !is_status_header { + response_builder = response_builder.header(header.name, header.value); + } + } + + response_builder = response_builder.status(status_code); + + let reader_stream = ReaderStream::new(cgi_response); + let stream_body = StreamBody::new(reader_stream.map_ok(Frame::data)); + let boxed_body = stream_body.boxed(); + + let response = response_builder.body(boxed_body)?; + + if let Some(exit_code) = child.try_wait()? { + if !exit_code.success() { + if let Some(mut stderr) = stderr { + let mut stderr_string = String::new(); + stderr + .read_to_string(&mut stderr_string) + .await + .unwrap_or_default(); + let stderr_string_trimmed = stderr_string.trim(); + if !stderr_string_trimmed.is_empty() { + error_logger + .log(&format!("There were CGI errors: {}", stderr_string_trimmed)) + .await; + } + } + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .build(), + ); + } + } + + let error_logger = error_logger.clone(); + + Ok( + ResponseData::builder_without_request() + .response(response) + .parallel_fn(async move { + if !early_stdin_copied { + stdin_copy_future_pinned.await.unwrap_or_default(); + } + + if let Some(mut stderr) = stderr { + let mut stderr_string = String::new(); + stderr + .read_to_string(&mut stderr_string) + .await + .unwrap_or_default(); + let stderr_string_trimmed = stderr_string.trim(); + if !stderr_string_trimmed.is_empty() { + error_logger + .log(&format!("There were CGI errors: {}", stderr_string_trimmed)) + .await; + } + } + }) + .build(), + ) +} + +#[allow(dead_code)] +#[cfg(unix)] +async fn get_executable( + execute_pathbuf: &PathBuf, +) -> Result, Box> { + use std::os::unix::fs::PermissionsExt; + + let metadata = fs::metadata(&execute_pathbuf).await?; + let permissions = metadata.permissions(); + let is_executable = permissions.mode() & 0o111 != 0; + + if !is_executable { + Err(anyhow::anyhow!("The CGI program is not executable"))? + } + + let executable_params_vector = vec![execute_pathbuf.to_string_lossy().to_string()]; + Ok(executable_params_vector) +} + +#[allow(dead_code)] +#[cfg(not(unix))] +async fn get_executable( + execute_pathbuf: &PathBuf, +) -> Result, Box> { + use tokio::io::{AsyncBufReadExt, AsyncSeekExt, BufReader}; + + let mut magic_signature_buffer = [0u8; 2]; + let mut open_file = fs::File::open(&execute_pathbuf).await?; + if open_file + .read_exact(&mut magic_signature_buffer) + .await + .is_err() + { + Err(anyhow::anyhow!("Failed to read the CGI program signature"))? + } + + match &magic_signature_buffer { + b"PE" => { + // Windows executables + let executable_params_vector = vec![execute_pathbuf.to_string_lossy().to_string()]; + Ok(executable_params_vector) + } + b"#!" => { + // Scripts with a shebang line + open_file.rewind().await?; + let mut buffered_file = BufReader::new(open_file); + let mut shebang_line = String::new(); + buffered_file.read_line(&mut shebang_line).await?; + + let mut command_begin: Vec = (&shebang_line[2..]) + .replace("\r", "") + .replace("\n", "") + .split(" ") + .map(|s| s.to_owned()) + .collect(); + command_begin.push(execute_pathbuf.to_string_lossy().to_string()); + Ok(command_begin) + } + _ => { + // It's not executable + Err(anyhow::anyhow!("The CGI program is not executable"))? + } + } +} diff --git a/ferron/src/optional_modules/example.rs b/ferron/src/optional_modules/example.rs new file mode 100644 index 0000000000000000000000000000000000000000..9ac2519d297d6eaf004cef5ac50401d09b8bb0b8 --- /dev/null +++ b/ferron/src/optional_modules/example.rs @@ -0,0 +1,143 @@ +use std::error::Error; + +use crate::ferron_common::{ + ErrorLogger, HyperUpgraded, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperResponse, WithRuntime}; +use async_trait::async_trait; +use http_body_util::{BodyExt, Full}; +use hyper::Response; +use hyper_tungstenite::HyperWebsocket; +use tokio::runtime::Handle; + +// Define a struct for the module implementation +struct ExampleModule; + +/// Initializes the server module and returns an instance of `ExampleModule`. +pub fn server_module_init( + _config: &ServerConfig, // This is YAML configuration parsed as-is. +) -> Result, Box> { + Ok(Box::new(ExampleModule::new())) +} + +impl ExampleModule { + /// Creates a new instance of `ExampleModule`. + fn new() -> Self { + ExampleModule + } +} + +/// Implements the `ServerModule` trait for `ExampleModule`. +impl ServerModule for ExampleModule { + /// Returns an instance of `ExampleModuleHandlers` to handle HTTP requests. + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(ExampleModuleHandlers { handle }) + } +} + +// Define a struct to handle HTTP requests +struct ExampleModuleHandlers { + handle: Handle, +} + +/// Implements the `ServerModuleHandlers` trait for `ExampleModuleHandlers`. +#[async_trait] +impl ServerModuleHandlers for ExampleModuleHandlers { + /// Handles incoming HTTP requests. + /// If the request path is `/hello`, it responds with "Hello World!". + async fn request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + if request.get_hyper_request().uri().path() == "/hello" { + Ok( + ResponseData::builder(request) + .response( + Response::builder().body( + Full::new("Hello World!".into()) + .map_err(|e| match e {}) + .boxed(), + )?, + ) + .build(), + ) + } else { + Ok(ResponseData::builder(request).build()) + } + }) + .await + } + + /// Handles non-CONNECT proxy requests (not used in this module). + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + // No proxy request handling needed. + Ok(ResponseData::builder(request).build()) + } + + /// Modifies outgoing responses (not used in this module). + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + // No response modification needed. + Ok(response) + } + + /// Modifies outgoing proxy responses (not used in this module). + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + // No proxy response modification needed. + Ok(response) + } + + /// Handles CONNECT proxy requests (not used in this module). + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + // No proxy request handling needed. + Ok(()) + } + + /// Checks if the module is a forward proxy module utilizing CONNECT method. + fn does_connect_proxy_requests(&mut self) -> bool { + // This is not a forward proxy module utilizing CONNECT method + false + } + + /// Handles WebSocket requests (not used in this module). + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + // No proxy request handling needed. + Ok(()) + } + + /// Checks if the module supports WebSocket connections. + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + // This module doesn't support WebSocket connections. + false + } +} diff --git a/ferron/src/optional_modules/fauth.rs b/ferron/src/optional_modules/fauth.rs new file mode 100644 index 0000000000000000000000000000000000000000..79a8f60d190f482b66502a2fb5d1c983ed15cc04 --- /dev/null +++ b/ferron/src/optional_modules/fauth.rs @@ -0,0 +1,572 @@ +// The "fauth" module is derived from the "rproxy" module, and inspired by Traefik's ForwardAuth middleware. + +use std::collections::HashMap; +use std::error::Error; +use std::str::FromStr; +use std::sync::Arc; + +use crate::ferron_common::{ + ErrorLogger, HyperUpgraded, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperResponse, WithRuntime}; +use async_trait::async_trait; +use http_body_util::combinators::BoxBody; +use http_body_util::{BodyExt, Empty}; +use hyper::body::Bytes; +use hyper::client::conn::http1::SendRequest; +use hyper::header::HeaderName; +use hyper::{header, Method, Request, StatusCode, Uri}; +use hyper_tungstenite::HyperWebsocket; +use hyper_util::rt::TokioIo; +use rustls::pki_types::ServerName; +use rustls::RootCertStore; +use rustls_native_certs::load_native_certs; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::net::TcpStream; +use tokio::runtime::Handle; +use tokio::sync::RwLock; +use tokio_rustls::TlsConnector; + +const DEFAULT_CONCURRENT_CONNECTIONS_PER_HOST: u32 = 32; + +pub fn server_module_init( + _config: &ServerConfig, +) -> Result, Box> { + let mut roots: RootCertStore = RootCertStore::empty(); + let certs_result = load_native_certs(); + if !certs_result.errors.is_empty() { + Err(anyhow::anyhow!(format!( + "Couldn't load the native certificate store: {}", + certs_result.errors[0] + )))? + } + let certs = certs_result.certs; + + for cert in certs { + match roots.add(cert) { + Ok(_) => (), + Err(err) => Err(anyhow::anyhow!(format!( + "Couldn't add a certificate to the certificate store: {}", + err + )))?, + } + } + + let mut connections_vec = Vec::new(); + for _ in 0..DEFAULT_CONCURRENT_CONNECTIONS_PER_HOST { + connections_vec.push(RwLock::new(HashMap::new())); + } + Ok(Box::new(ForwardedAuthenticationModule::new( + Arc::new(roots), + Arc::new(connections_vec), + ))) +} + +#[allow(clippy::type_complexity)] +struct ForwardedAuthenticationModule { + roots: Arc, + connections: Arc>>>>>, +} + +impl ForwardedAuthenticationModule { + #[allow(clippy::type_complexity)] + fn new( + roots: Arc, + connections: Arc>>>>>, + ) -> Self { + Self { roots, connections } + } +} + +impl ServerModule for ForwardedAuthenticationModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(ForwardedAuthenticationModuleHandlers { + roots: self.roots.clone(), + connections: self.connections.clone(), + handle, + }) + } +} + +#[allow(clippy::type_complexity)] +struct ForwardedAuthenticationModuleHandlers { + handle: Handle, + roots: Arc, + connections: Arc>>>>>, +} + +#[async_trait] +impl ServerModuleHandlers for ForwardedAuthenticationModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + let mut auth_to = None; + + if let Some(auth_to_str) = config["authTo"].as_str() { + auth_to = Some(auth_to_str.to_string()); + } + + let forwarded_auth_copy_headers = match config["forwardedAuthCopyHeaders"].as_vec() { + Some(vector) => { + let mut new_vector = Vec::new(); + for yaml_value in vector.iter() { + if let Some(str_value) = yaml_value.as_str() { + new_vector.push(str_value.to_string()); + } + } + new_vector + } + None => Vec::new(), + }; + + if let Some(auth_to) = auth_to { + let (hyper_request, auth_user, original_url) = request.into_parts(); + let (hyper_request_parts, request_body) = hyper_request.into_parts(); + + let auth_request_url = auth_to.parse::()?; + let scheme_str = auth_request_url.scheme_str(); + let mut encrypted = false; + + match scheme_str { + Some("http") => { + encrypted = false; + } + Some("https") => { + encrypted = true; + } + _ => Err(anyhow::anyhow!( + "Only HTTP and HTTPS reverse proxy URLs are supported." + ))?, + }; + + let host = match auth_request_url.host() { + Some(host) => host, + None => Err(anyhow::anyhow!( + "The reverse proxy URL doesn't include the host" + ))?, + }; + + let port = auth_request_url.port_u16().unwrap_or(match scheme_str { + Some("http") => 80, + Some("https") => 443, + _ => 80, + }); + + let addr = format!("{}:{}", host, port); + let authority = auth_request_url.authority().cloned(); + + let hyper_request_path = hyper_request_parts.uri.path(); + + let path_and_query = format!( + "{}{}", + hyper_request_path, + match hyper_request_parts.uri.query() { + Some(query) => format!("?{}", query), + None => "".to_string(), + } + ); + + let mut auth_hyper_request_parts = hyper_request_parts.clone(); + + auth_hyper_request_parts.uri = Uri::from_str(&format!( + "{}{}", + auth_request_url.path(), + match auth_request_url.query() { + Some(query) => format!("?{}", query), + None => "".to_string(), + } + ))?; + + let original_host = hyper_request_parts.headers.get(header::HOST).cloned(); + + // Host header for host identification + match authority { + Some(authority) => { + auth_hyper_request_parts + .headers + .insert(header::HOST, authority.to_string().parse()?); + } + None => { + auth_hyper_request_parts.headers.remove(header::HOST); + } + } + + // Connection header to enable HTTP/1.1 keep-alive + auth_hyper_request_parts + .headers + .insert(header::CONNECTION, "keep-alive".parse()?); + + // X-Forwarded-* headers to send the client's data to a forwarded authentication server + auth_hyper_request_parts.headers.insert( + "x-forwarded-for", + socket_data + .remote_addr + .ip() + .to_canonical() + .to_string() + .parse()?, + ); + + if socket_data.encrypted { + auth_hyper_request_parts + .headers + .insert("x-forwarded-proto", "https".parse()?); + } else { + auth_hyper_request_parts + .headers + .insert("x-forwarded-proto", "http".parse()?); + } + + if let Some(original_host) = original_host { + auth_hyper_request_parts + .headers + .insert("x-forwarded-host", original_host); + } + + auth_hyper_request_parts + .headers + .insert("x-forwarded-uri", path_and_query.parse()?); + + auth_hyper_request_parts.headers.insert( + "x-forwarded-method", + hyper_request_parts.method.as_str().parse()?, + ); + + auth_hyper_request_parts.method = Method::GET; + + let auth_request = Request::from_parts( + auth_hyper_request_parts, + Empty::new().map_err(|e| match e {}).boxed(), + ); + let original_hyper_request = Request::from_parts(hyper_request_parts, request_body); + let original_request = RequestData::new(original_hyper_request, auth_user, original_url); + + let connections = &self.connections[rand::random_range(..self.connections.len())]; + + let rwlock_read = connections.read().await; + let sender_read_option = rwlock_read.get(&addr); + + if let Some(sender_read) = sender_read_option { + if !sender_read.is_closed() { + drop(rwlock_read); + let mut rwlock_write = connections.write().await; + let sender_option = rwlock_write.get_mut(&addr); + + if let Some(sender) = sender_option { + if !sender.is_closed() { + let result = http_forwarded_auth_kept_alive( + sender, + auth_request, + error_logger, + original_request, + forwarded_auth_copy_headers, + ) + .await; + drop(rwlock_write); + return result; + } else { + drop(rwlock_write); + } + } else { + drop(rwlock_write); + } + } else { + drop(rwlock_read); + } + } else { + drop(rwlock_read); + } + + let stream = match TcpStream::connect(&addr).await { + Ok(stream) => stream, + Err(err) => { + match err.kind() { + tokio::io::ErrorKind::ConnectionRefused + | tokio::io::ErrorKind::NotFound + | tokio::io::ErrorKind::HostUnreachable => { + error_logger + .log(&format!("Service unavailable: {}", err)) + .await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::SERVICE_UNAVAILABLE) + .build(), + ); + } + tokio::io::ErrorKind::TimedOut => { + error_logger.log(&format!("Gateway timeout: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::GATEWAY_TIMEOUT) + .build(), + ); + } + _ => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + } + }; + + match stream.set_nodelay(true) { + Ok(_) => (), + Err(err) => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + + if !encrypted { + http_forwarded_auth( + connections, + addr, + stream, + auth_request, + error_logger, + original_request, + forwarded_auth_copy_headers, + ) + .await + } else { + let tls_client_config = rustls::ClientConfig::builder() + .with_root_certificates(self.roots.clone()) + .with_no_client_auth(); + let connector = TlsConnector::from(Arc::new(tls_client_config)); + let domain = ServerName::try_from(host)?.to_owned(); + + let tls_stream = match connector.connect(domain, stream).await { + Ok(stream) => stream, + Err(err) => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + + http_forwarded_auth( + connections, + addr, + tls_stream, + auth_request, + error_logger, + original_request, + forwarded_auth_copy_headers, + ) + .await + } + } else { + Ok(ResponseData::builder(request).build()) + } + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} + +async fn http_forwarded_auth( + connections: &RwLock>>>, + connect_addr: String, + stream: impl AsyncRead + AsyncWrite + Send + Unpin + 'static, + proxy_request: Request>, + error_logger: &ErrorLogger, + mut original_request: RequestData, + forwarded_auth_copy_headers: Vec, +) -> Result> { + let io = TokioIo::new(stream); + + let (mut sender, conn) = match hyper::client::conn::http1::handshake(io).await { + Ok(data) => data, + Err(err) => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + + let send_request = sender.send_request(proxy_request); + + let mut pinned_conn = Box::pin(conn); + tokio::pin!(send_request); + + let response; + + loop { + tokio::select! { + biased; + + proxy_response = &mut send_request => { + let proxy_response = match proxy_response { + Ok(response) => response, + Err(err) => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok(ResponseData::builder_without_request().status(StatusCode::BAD_GATEWAY).build()); + } + }; + + if proxy_response.status().is_success() { + if !forwarded_auth_copy_headers.is_empty() { + let response_headers = proxy_response.headers(); + let request_headers = original_request.get_mut_hyper_request().headers_mut(); + for forwarded_auth_copy_header_string in forwarded_auth_copy_headers.iter() { + let forwarded_auth_copy_header= HeaderName::from_str(forwarded_auth_copy_header_string)?; + if response_headers.contains_key(&forwarded_auth_copy_header) { + while request_headers.remove(&forwarded_auth_copy_header).is_some() {} + for header_value in response_headers.get_all(&forwarded_auth_copy_header).iter() { + request_headers.append(&forwarded_auth_copy_header, header_value.clone()); + } + } + } + } + response = ResponseData::builder(original_request).build(); + } else { + response = ResponseData::builder_without_request() + .response(proxy_response.map(|b| { + b.map_err(|e| std::io::Error::other(e.to_string())) + .boxed() + })) + .parallel_fn(async move { + pinned_conn.await.unwrap_or_default(); + }) + .build(); + + } + + break; + }, + state = &mut pinned_conn => { + if state.is_err() { + error_logger.log("Bad gateway: incomplete response").await; + return Ok(ResponseData::builder_without_request().status(StatusCode::BAD_GATEWAY).build()); + } + }, + }; + } + + if !sender.is_closed() { + let mut rwlock_write = connections.write().await; + rwlock_write.insert(connect_addr, sender); + drop(rwlock_write); + } + + Ok(response) +} + +async fn http_forwarded_auth_kept_alive( + sender: &mut SendRequest>, + proxy_request: Request>, + error_logger: &ErrorLogger, + mut original_request: RequestData, + forwarded_auth_copy_headers: Vec, +) -> Result> { + let proxy_response = match sender.send_request(proxy_request).await { + Ok(response) => response, + Err(err) => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + + let response = if proxy_response.status().is_success() { + if !forwarded_auth_copy_headers.is_empty() { + let response_headers = proxy_response.headers(); + let request_headers = original_request.get_mut_hyper_request().headers_mut(); + for forwarded_auth_copy_header_string in forwarded_auth_copy_headers.iter() { + let forwarded_auth_copy_header = HeaderName::from_str(forwarded_auth_copy_header_string)?; + if response_headers.contains_key(&forwarded_auth_copy_header) { + while request_headers + .remove(&forwarded_auth_copy_header) + .is_some() + {} + for header_value in response_headers.get_all(&forwarded_auth_copy_header).iter() { + request_headers.append(&forwarded_auth_copy_header, header_value.clone()); + } + } + } + } + ResponseData::builder(original_request).build() + } else { + ResponseData::builder_without_request() + .response(proxy_response.map(|b| b.map_err(|e| std::io::Error::other(e.to_string())).boxed())) + .build() + }; + + Ok(response) +} diff --git a/ferron/src/optional_modules/fcgi.rs b/ferron/src/optional_modules/fcgi.rs new file mode 100644 index 0000000000000000000000000000000000000000..f52821c459f2b7c650276a5855d78cef4bb0d401 --- /dev/null +++ b/ferron/src/optional_modules/fcgi.rs @@ -0,0 +1,964 @@ +// FastCGI handler code inspired by SVR.JS's GreenRhombus mod, translated from JavaScript to Rust. +// Based on the "cgi" and "scgi" module +use std::env; +use std::error::Error; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::time::Duration; + +use crate::ferron_common::{ + ErrorLogger, HyperRequest, HyperResponse, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperUpgraded, WithRuntime}; +use async_trait::async_trait; +use futures_util::future::Either; +use futures_util::TryStreamExt; +use hashlink::LinkedHashMap; +use http_body_util::{BodyExt, StreamBody}; +use httparse::EMPTY_HEADER; +use hyper::body::{Bytes, Frame}; +use hyper::{header, Response, StatusCode}; +use hyper_tungstenite::HyperWebsocket; +use tokio::fs; +use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; +use tokio::net::TcpStream; +use tokio::runtime::Handle; +use tokio::sync::RwLock; +use tokio_util::codec::{FramedRead, FramedWrite}; +use tokio_util::io::{ReaderStream, SinkWriter, StreamReader}; + +use crate::ferron_res::server_software::SERVER_SOFTWARE; +use crate::ferron_util::cgi_response::CgiResponse; +use crate::ferron_util::copy_move::Copier; +use crate::ferron_util::fcgi_decoder::{FcgiDecodedData, FcgiDecoder}; +use crate::ferron_util::fcgi_encoder::FcgiEncoder; +use crate::ferron_util::fcgi_name_value_pair::construct_fastcgi_name_value_pair; +use crate::ferron_util::fcgi_record::construct_fastcgi_record; +use crate::ferron_util::read_to_end_move::ReadToEndFuture; +use crate::ferron_util::split_stream_by_map::SplitStreamByMapExt; +use crate::ferron_util::ttl_cache::TtlCache; + +pub fn server_module_init( + _config: &ServerConfig, +) -> Result, Box> { + let cache = Arc::new(RwLock::new(TtlCache::new(Duration::from_millis(100)))); + Ok(Box::new(FcgiModule::new(cache))) +} + +#[allow(clippy::type_complexity)] +struct FcgiModule { + path_cache: Arc, Option)>>>, +} + +impl FcgiModule { + #[allow(clippy::type_complexity)] + fn new(path_cache: Arc, Option)>>>) -> Self { + Self { path_cache } + } +} + +impl ServerModule for FcgiModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(FcgiModuleHandlers { + path_cache: self.path_cache.clone(), + handle, + }) + } +} + +#[allow(clippy::type_complexity)] +struct FcgiModuleHandlers { + handle: Handle, + path_cache: Arc, Option)>>>, +} + +#[async_trait] +impl ServerModuleHandlers for FcgiModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + let mut fastcgi_script_exts = Vec::new(); + + let fastcgi_script_exts_yaml = &config["fcgiScriptExtensions"]; + if let Some(fastcgi_script_exts_obtained) = fastcgi_script_exts_yaml.as_vec() { + for fastcgi_script_ext_yaml in fastcgi_script_exts_obtained.iter() { + if let Some(fastcgi_script_ext) = fastcgi_script_ext_yaml.as_str() { + fastcgi_script_exts.push(fastcgi_script_ext); + } + } + } + + let mut fastcgi_to = "tcp://localhost:4000/"; + let fastcgi_to_yaml = &config["fcgiTo"]; + if let Some(fastcgi_to_obtained) = fastcgi_to_yaml.as_str() { + fastcgi_to = fastcgi_to_obtained; + } + + let mut fastcgi_path = None; + if let Some(fastcgi_path_obtained) = config["fcgiPath"].as_str() { + fastcgi_path = Some(fastcgi_path_obtained.to_string()); + } + + let hyper_request = request.get_hyper_request(); + + let request_path = hyper_request.uri().path(); + let mut request_path_bytes = request_path.bytes(); + if request_path_bytes.len() < 1 || request_path_bytes.nth(0) != Some(b'/') { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + + let mut execute_pathbuf = None; + let mut execute_path_info = None; + let mut wwwroot_detected = None; + + if let Some(fastcgi_path) = fastcgi_path { + let mut canonical_fastcgi_path: &str = &fastcgi_path; + if canonical_fastcgi_path.bytes().last() == Some(b'/') { + canonical_fastcgi_path = &canonical_fastcgi_path[..(canonical_fastcgi_path.len() - 1)]; + } + + let request_path_with_slashes = match request_path == canonical_fastcgi_path { + true => format!("{}/", request_path), + false => request_path.to_string(), + }; + if let Some(stripped_request_path) = + request_path_with_slashes.strip_prefix(canonical_fastcgi_path) + { + let wwwroot_yaml = &config["wwwroot"]; + let wwwroot = wwwroot_yaml.as_str().unwrap_or("/nonexistent"); + + let wwwroot_unknown = PathBuf::from(wwwroot); + let wwwroot_pathbuf = match wwwroot_unknown.as_path().is_absolute() { + true => wwwroot_unknown, + false => match fs::canonicalize(&wwwroot_unknown).await { + Ok(pathbuf) => pathbuf, + Err(_) => wwwroot_unknown, + }, + }; + wwwroot_detected = Some(wwwroot_pathbuf.clone()); + let wwwroot = wwwroot_pathbuf.as_path(); + + let mut relative_path = &request_path[1..]; + while relative_path.as_bytes().first().copied() == Some(b'/') { + relative_path = &relative_path[1..]; + } + + let decoded_relative_path = match urlencoding::decode(relative_path) { + Ok(path) => path.to_string(), + Err(_) => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let joined_pathbuf = wwwroot.join(decoded_relative_path); + execute_pathbuf = Some(joined_pathbuf); + execute_path_info = stripped_request_path + .strip_prefix("/") + .map(|s| s.to_string()); + } + } + + if execute_pathbuf.is_none() { + if let Some(wwwroot) = config["wwwroot"].as_str() { + let cache_key = format!( + "{}{}{}", + match config["ip"].as_str() { + Some(ip) => format!("{}-", ip), + None => String::from(""), + }, + match config["domain"].as_str() { + Some(domain) => format!("{}-", domain), + None => String::from(""), + }, + request_path + ); + + let wwwroot_unknown = PathBuf::from(wwwroot); + let wwwroot_pathbuf = match wwwroot_unknown.as_path().is_absolute() { + true => wwwroot_unknown, + false => match fs::canonicalize(&wwwroot_unknown).await { + Ok(pathbuf) => pathbuf, + Err(_) => wwwroot_unknown, + }, + }; + wwwroot_detected = Some(wwwroot_pathbuf.clone()); + let wwwroot = wwwroot_pathbuf.as_path(); + + let read_rwlock = self.path_cache.read().await; + let (execute_pathbuf_got, execute_path_info_got) = match read_rwlock.get(&cache_key) { + Some(data) => { + drop(read_rwlock); + data + } + None => { + drop(read_rwlock); + let mut relative_path = &request_path[1..]; + while relative_path.as_bytes().first().copied() == Some(b'/') { + relative_path = &relative_path[1..]; + } + + let decoded_relative_path = match urlencoding::decode(relative_path) { + Ok(path) => path.to_string(), + Err(_) => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let joined_pathbuf = wwwroot.join(decoded_relative_path); + let mut execute_pathbuf: Option = None; + let mut execute_path_info: Option = None; + + match fs::metadata(&joined_pathbuf).await { + Ok(metadata) => { + if metadata.is_file() { + let contained_extension = joined_pathbuf + .extension() + .map(|a| format!(".{}", a.to_string_lossy())); + if let Some(contained_extension) = contained_extension { + if fastcgi_script_exts.contains(&(&contained_extension as &str)) { + execute_pathbuf = Some(joined_pathbuf); + } + } + } else if metadata.is_dir() { + let indexes = vec!["index.php", "index.cgi"]; + for index in indexes { + let temp_joined_pathbuf = joined_pathbuf.join(index); + match fs::metadata(&temp_joined_pathbuf).await { + Ok(temp_metadata) => { + if temp_metadata.is_file() { + let contained_extension = temp_joined_pathbuf + .extension() + .map(|a| format!(".{}", a.to_string_lossy())); + if let Some(contained_extension) = contained_extension { + if fastcgi_script_exts.contains(&(&contained_extension as &str)) { + execute_pathbuf = Some(temp_joined_pathbuf); + break; + } + } + } + } + Err(_) => continue, + }; + } + } + } + Err(err) => { + if err.kind() == tokio::io::ErrorKind::NotADirectory { + // TODO: find a file + let mut temp_pathbuf = joined_pathbuf.clone(); + loop { + if !temp_pathbuf.pop() { + break; + } + match fs::metadata(&temp_pathbuf).await { + Ok(metadata) => { + if metadata.is_file() { + let temp_path = temp_pathbuf.as_path(); + if !temp_path.starts_with(wwwroot) { + // Traversed above the webroot, so ignore that. + break; + } + let path_info = match joined_pathbuf.as_path().strip_prefix(temp_path) { + Ok(path) => { + let path = path.to_string_lossy().to_string(); + Some(match cfg!(windows) { + true => path.replace("\\", "/"), + false => path, + }) + } + Err(_) => None, + }; + let mut request_path_normalized = match cfg!(windows) { + true => request_path.to_lowercase(), + false => request_path.to_string(), + }; + while request_path_normalized.contains("//") { + request_path_normalized = request_path_normalized.replace("//", "/"); + } + if request_path_normalized == "/cgi-bin" + || request_path_normalized.starts_with("/cgi-bin/") + { + execute_pathbuf = Some(temp_pathbuf); + execute_path_info = path_info; + break; + } else { + let contained_extension = temp_pathbuf + .extension() + .map(|a| format!(".{}", a.to_string_lossy())); + if let Some(contained_extension) = contained_extension { + if fastcgi_script_exts.contains(&(&contained_extension as &str)) { + execute_pathbuf = Some(temp_pathbuf); + execute_path_info = path_info; + break; + } + } + } + } else { + break; + } + } + Err(err) => match err.kind() { + tokio::io::ErrorKind::NotADirectory => (), + _ => break, + }, + }; + } + } + } + }; + let data = (execute_pathbuf, execute_path_info); + + let mut write_rwlock = self.path_cache.write().await; + write_rwlock.cleanup(); + write_rwlock.insert(cache_key, data.clone()); + drop(write_rwlock); + data + } + }; + + execute_pathbuf = execute_pathbuf_got; + execute_path_info = execute_path_info_got; + } + } + + if let Some(execute_pathbuf) = execute_pathbuf { + if let Some(wwwroot_detected) = wwwroot_detected { + return execute_fastcgi_with_environment_variables( + request, + socket_data, + error_logger, + wwwroot_detected.as_path(), + execute_pathbuf, + execute_path_info, + config["serverAdministratorEmail"].as_str(), + fastcgi_to, + ) + .await; + } + } + + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} + +#[allow(clippy::too_many_arguments)] +async fn execute_fastcgi_with_environment_variables( + request: RequestData, + socket_data: &SocketData, + error_logger: &ErrorLogger, + wwwroot: &Path, + execute_pathbuf: PathBuf, + path_info: Option, + server_administrator_email: Option<&str>, + fastcgi_to: &str, +) -> Result> { + let mut environment_variables: LinkedHashMap = LinkedHashMap::new(); + + let hyper_request = request.get_hyper_request(); + let original_request_uri = request.get_original_url().unwrap_or(hyper_request.uri()); + + if let Some(auth_user) = request.get_auth_user() { + if let Some(authorization) = hyper_request.headers().get(header::AUTHORIZATION) { + let authorization_value = String::from_utf8_lossy(authorization.as_bytes()).to_string(); + let mut authorization_value_split = authorization_value.split(" "); + if let Some(authorization_type) = authorization_value_split.next() { + environment_variables.insert("AUTH_TYPE".to_string(), authorization_type.to_string()); + } + } + environment_variables.insert("REMOTE_USER".to_string(), auth_user.to_string()); + } + + environment_variables.insert( + "QUERY_STRING".to_string(), + match hyper_request.uri().query() { + Some(query) => query.to_string(), + None => "".to_string(), + }, + ); + + environment_variables.insert("SERVER_SOFTWARE".to_string(), SERVER_SOFTWARE.to_string()); + environment_variables.insert( + "SERVER_PROTOCOL".to_string(), + match hyper_request.version() { + hyper::Version::HTTP_09 => "HTTP/0.9".to_string(), + hyper::Version::HTTP_10 => "HTTP/1.0".to_string(), + hyper::Version::HTTP_11 => "HTTP/1.1".to_string(), + hyper::Version::HTTP_2 => "HTTP/2.0".to_string(), + hyper::Version::HTTP_3 => "HTTP/3.0".to_string(), + _ => "HTTP/Unknown".to_string(), + }, + ); + environment_variables.insert( + "SERVER_PORT".to_string(), + socket_data.local_addr.port().to_string(), + ); + environment_variables.insert( + "SERVER_ADDR".to_string(), + socket_data.local_addr.ip().to_canonical().to_string(), + ); + if let Some(server_administrator_email) = server_administrator_email { + environment_variables.insert( + "SERVER_ADMIN".to_string(), + server_administrator_email.to_string(), + ); + } + if let Some(host) = hyper_request.headers().get(header::HOST) { + environment_variables.insert( + "SERVER_NAME".to_string(), + String::from_utf8_lossy(host.as_bytes()).to_string(), + ); + } + + environment_variables.insert( + "DOCUMENT_ROOT".to_string(), + wwwroot.to_string_lossy().to_string(), + ); + environment_variables.insert( + "PATH_INFO".to_string(), + match &path_info { + Some(path_info) => format!("/{}", path_info), + None => "".to_string(), + }, + ); + environment_variables.insert( + "PATH_TRANSLATED".to_string(), + match &path_info { + Some(path_info) => { + let mut path_translated = execute_pathbuf.clone(); + path_translated.push(path_info); + path_translated.to_string_lossy().to_string() + } + None => "".to_string(), + }, + ); + environment_variables.insert( + "REQUEST_METHOD".to_string(), + hyper_request.method().to_string(), + ); + environment_variables.insert("GATEWAY_INTERFACE".to_string(), "CGI/1.1".to_string()); + environment_variables.insert( + "REQUEST_URI".to_string(), + format!( + "{}{}", + original_request_uri.path(), + match original_request_uri.query() { + Some(query) => format!("?{}", query), + None => String::from(""), + } + ), + ); + + environment_variables.insert( + "REMOTE_PORT".to_string(), + socket_data.remote_addr.port().to_string(), + ); + environment_variables.insert( + "REMOTE_ADDR".to_string(), + socket_data.remote_addr.ip().to_canonical().to_string(), + ); + + environment_variables.insert( + "SCRIPT_FILENAME".to_string(), + execute_pathbuf.to_string_lossy().to_string(), + ); + if let Ok(script_path) = execute_pathbuf.as_path().strip_prefix(wwwroot) { + environment_variables.insert( + "SCRIPT_NAME".to_string(), + format!( + "/{}", + match cfg!(windows) { + true => script_path.to_string_lossy().to_string().replace("\\", "/"), + false => script_path.to_string_lossy().to_string(), + } + ), + ); + } + + if socket_data.encrypted { + environment_variables.insert("HTTPS".to_string(), "ON".to_string()); + } + + let mut content_length_set = false; + for (header_name, header_value) in hyper_request.headers().iter() { + let env_header_name = match *header_name { + header::CONTENT_LENGTH => { + content_length_set = true; + "CONTENT_LENGTH".to_string() + } + header::CONTENT_TYPE => "CONTENT_TYPE".to_string(), + _ => { + let mut result = String::new(); + + result.push_str("HTTP_"); + + for c in header_name.as_str().to_uppercase().chars() { + if c.is_alphanumeric() { + result.push(c); + } else { + result.push('_'); + } + } + + result + } + }; + if environment_variables.contains_key(&env_header_name) { + let value = environment_variables.get_mut(&env_header_name); + if let Some(value) = value { + if env_header_name == "HTTP_COOKIE" { + value.push_str("; "); + } else { + // See https://stackoverflow.com/a/1801191 + value.push_str(", "); + } + value.push_str(String::from_utf8_lossy(header_value.as_bytes()).as_ref()); + } else { + environment_variables.insert( + env_header_name, + String::from_utf8_lossy(header_value.as_bytes()).to_string(), + ); + } + } else { + environment_variables.insert( + env_header_name, + String::from_utf8_lossy(header_value.as_bytes()).to_string(), + ); + } + } + + if !content_length_set { + environment_variables.insert("CONTENT_LENGTH".to_string(), "0".to_string()); + } + + let (hyper_request, _, _) = request.into_parts(); + + execute_fastcgi( + hyper_request, + error_logger, + fastcgi_to, + environment_variables, + ) + .await +} + +async fn execute_fastcgi( + hyper_request: HyperRequest, + error_logger: &ErrorLogger, + fastcgi_to: &str, + mut environment_variables: LinkedHashMap, +) -> Result> { + let (_, body) = hyper_request.into_parts(); + + // Insert other environment variables + for (key, value) in env::vars_os() { + let key_string = key.to_string_lossy().to_string(); + let value_string = value.to_string_lossy().to_string(); + environment_variables + .entry(key_string) + .or_insert(value_string); + } + + let fastcgi_to_fixed = if let Some(stripped) = fastcgi_to.strip_prefix("unix:///") { + // hyper::Uri fails to parse a string if there is an empty authority, so add an "ignore" authority to Unix socket URLs + &format!("unix://ignore/{}", stripped) + } else { + fastcgi_to + }; + + let fastcgi_to_url = fastcgi_to_fixed.parse::()?; + let scheme_str = fastcgi_to_url.scheme_str(); + + let (socket_reader, mut socket_writer) = match scheme_str { + Some("tcp") => { + let host = match fastcgi_to_url.host() { + Some(host) => host, + None => Err(anyhow::anyhow!("The FastCGI URL doesn't include the host"))?, + }; + + let port = match fastcgi_to_url.port_u16() { + Some(port) => port, + None => Err(anyhow::anyhow!("The FastCGI URL doesn't include the port"))?, + }; + + let addr = format!("{}:{}", host, port); + + match connect_tcp(&addr).await { + Ok(data) => data, + Err(err) => match err.kind() { + tokio::io::ErrorKind::ConnectionRefused + | tokio::io::ErrorKind::NotFound + | tokio::io::ErrorKind::HostUnreachable => { + error_logger + .log(&format!("Service unavailable: {}", err)) + .await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::SERVICE_UNAVAILABLE) + .build(), + ); + } + _ => Err(err)?, + }, + } + } + Some("unix") => { + let path = fastcgi_to_url.path(); + match connect_unix(path).await { + Ok(data) => data, + Err(err) => match err.kind() { + tokio::io::ErrorKind::ConnectionRefused + | tokio::io::ErrorKind::NotFound + | tokio::io::ErrorKind::HostUnreachable => { + error_logger + .log(&format!("Service unavailable: {}", err)) + .await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::SERVICE_UNAVAILABLE) + .build(), + ); + } + _ => Err(err)?, + }, + } + } + _ => Err(anyhow::anyhow!( + "Only HTTP and HTTPS reverse proxy URLs are supported." + ))?, + }; + + // Construct and send BEGIN_REQUEST record + // Use the responder role and don't use keep-alive + let begin_request_packet = construct_fastcgi_record(1, 1, &[0, 1, 0, 0, 0, 0, 0, 0]); + socket_writer.write_all(&begin_request_packet).await?; + + // Construct and send PARAMS records + let mut environment_variables_to_wrap = Vec::new(); + for (key, value) in environment_variables.iter() { + let mut environment_variable = + construct_fastcgi_name_value_pair(key.as_bytes(), value.as_bytes()); + environment_variables_to_wrap.append(&mut environment_variable); + } + if !environment_variables_to_wrap.is_empty() { + let mut offset = 0; + while offset < environment_variables_to_wrap.len() { + let chunk_size = std::cmp::min(65536, environment_variables_to_wrap.len() - offset); + let chunk = &environment_variables_to_wrap[offset..offset + chunk_size]; + + // Record type 4 means PARAMS + let params_packet = construct_fastcgi_record(4, 1, chunk); + socket_writer.write_all(¶ms_packet).await?; + + offset += chunk_size; + } + } + + let params_packet_terminating = construct_fastcgi_record(4, 1, &[]); + socket_writer.write_all(¶ms_packet_terminating).await?; + + let cgi_stdin_reader = StreamReader::new(body.into_data_stream().map_err(std::io::Error::other)); + + // Emulated standard input, standard output, and standard error + type EitherStream = Either, Result>; + let stdin = SinkWriter::new(FramedWrite::new(socket_writer, FcgiEncoder::new())); + let stdout_and_stderr = FramedRead::new(socket_reader, FcgiDecoder::new()); + let (stdout_stream, stderr_stream) = stdout_and_stderr.split_by_map(|item| match item { + Ok(FcgiDecodedData::Stdout(bytes)) => EitherStream::Left(Ok(bytes)), + Ok(FcgiDecodedData::Stderr(bytes)) => EitherStream::Right(Ok(bytes)), + Err(err) => EitherStream::Left(Err(err)), + }); + let stdout = StreamReader::new(stdout_stream); + let stderr = StreamReader::new(stderr_stream); + + let mut cgi_response = CgiResponse::new(stdout); + + let stdin_copy_future = Copier::with_zero_packet_writing(cgi_stdin_reader, stdin).copy(); + let mut stdin_copy_future_pinned = Box::pin(stdin_copy_future); + + let stderr_read_future = ReadToEndFuture::new(stderr); + let mut stderr_read_future_pinned = Box::pin(stderr_read_future); + + let mut headers = [EMPTY_HEADER; 128]; + + let mut early_stdin_copied = false; + + // Needed to wrap this in another scope to prevent errors with multiple mutable borrows. + { + let mut head_obtained = false; + let stdout_parse_future = cgi_response.get_head(); + tokio::pin!(stdout_parse_future); + + // Cannot use a loop with tokio::select, since stdin_copy_future_pinned being constantly ready will make the web server stop responding to HTTP requests + tokio::select! { + biased; + + result = &mut stdin_copy_future_pinned => { + early_stdin_copied = true; + result?; + }, + obtained_head = &mut stdout_parse_future => { + let obtained_head = obtained_head?; + if !obtained_head.is_empty() { + httparse::parse_headers(obtained_head, &mut headers)?; + } + head_obtained = true; + }, + result = &mut stderr_read_future_pinned => { + let stderr_vec = result?; + let stderr_string = String::from_utf8_lossy(stderr_vec.as_slice()).to_string(); + if !stderr_string.is_empty() { + error_logger + .log(&format!("There were CGI errors: {}", stderr_string)) + .await; + } + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .build(), + ); + }, + } + + if !head_obtained { + // Kept it same as in the tokio::select macro + tokio::select! { + biased; + + result = &mut stderr_read_future_pinned => { + let stderr_vec = result?; + let stderr_string = String::from_utf8_lossy(stderr_vec.as_slice()).to_string(); + if !stderr_string.is_empty() { + error_logger + .log(&format!("There were FastCGI errors: {}", stderr_string)) + .await; + } + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .build(), + ); + }, + obtained_head = &mut stdout_parse_future => { + let obtained_head = obtained_head?; + if !obtained_head.is_empty() { + httparse::parse_headers(obtained_head, &mut headers)?; + } + } + } + } + } + + let mut response_builder = Response::builder(); + let mut status_code = 200; + for header in headers { + if header == EMPTY_HEADER { + break; + } + let mut is_status_header = false; + match &header.name.to_lowercase() as &str { + "location" => { + if !(300..=399).contains(&status_code) { + status_code = 302; + } + } + "status" => { + is_status_header = true; + let header_value_cow = String::from_utf8_lossy(header.value); + let mut split_status = header_value_cow.split(" "); + let first_part = split_status.next(); + if let Some(first_part) = first_part { + if first_part.starts_with("HTTP/") { + let second_part = split_status.next(); + if let Some(second_part) = second_part { + if let Ok(parsed_status_code) = second_part.parse::() { + status_code = parsed_status_code; + } + } + } else if let Ok(parsed_status_code) = first_part.parse::() { + status_code = parsed_status_code; + } + } + } + _ => (), + } + if !is_status_header { + response_builder = response_builder.header(header.name, header.value); + } + } + + response_builder = response_builder.status(status_code); + + let reader_stream = ReaderStream::new(cgi_response); + let stream_body = StreamBody::new(reader_stream.map_ok(Frame::data)); + let boxed_body = stream_body.boxed(); + + let response = response_builder.body(boxed_body)?; + + let error_logger = error_logger.clone(); + + Ok( + ResponseData::builder_without_request() + .response(response) + .parallel_fn(async move { + let mut stdin_copied = early_stdin_copied; + + if !stdin_copied { + tokio::select! { + biased; + + _ = &mut stdin_copy_future_pinned => { + stdin_copied = true; + }, + result = &mut stderr_read_future_pinned => { + let stderr_vec = result.unwrap_or(vec![]); + let stderr_string = String::from_utf8_lossy(stderr_vec.as_slice()).to_string(); + if !stderr_string.is_empty() { + error_logger + .log(&format!("There were FastCGI errors: {}", stderr_string)) + .await; + } + }, + } + } + + if stdin_copied { + let stderr_vec = stderr_read_future_pinned.await.unwrap_or(vec![]); + let stderr_string = String::from_utf8_lossy(stderr_vec.as_slice()).to_string(); + if !stderr_string.is_empty() { + error_logger + .log(&format!("There were FastCGI errors: {}", stderr_string)) + .await; + } + } else { + stdin_copy_future_pinned.await.unwrap_or_default(); + } + }) + .build(), + ) +} + +async fn connect_tcp( + addr: &str, +) -> Result< + ( + Box, + Box, + ), + tokio::io::Error, +> { + let socket = TcpStream::connect(addr).await?; + socket.set_nodelay(true)?; + + let (socket_reader_set, socket_writer_set) = tokio::io::split(socket); + Ok((Box::new(socket_reader_set), Box::new(socket_writer_set))) +} + +#[allow(dead_code)] +#[cfg(unix)] +async fn connect_unix( + path: &str, +) -> Result< + ( + Box, + Box, + ), + tokio::io::Error, +> { + use tokio::net::UnixStream; + + let socket = UnixStream::connect(path).await?; + + let (socket_reader_set, socket_writer_set) = tokio::io::split(socket); + Ok((Box::new(socket_reader_set), Box::new(socket_writer_set))) +} + +#[allow(dead_code)] +#[cfg(not(unix))] +async fn connect_unix( + _path: &str, +) -> Result< + ( + Box, + Box, + ), + tokio::io::Error, +> { + Err(tokio::io::Error::new( + tokio::io::ErrorKind::Unsupported, + "Unix sockets are not supports on non-Unix platforms.", + )) +} diff --git a/ferron/src/optional_modules/fproxy.rs b/ferron/src/optional_modules/fproxy.rs new file mode 100644 index 0000000000000000000000000000000000000000..88039fbe831fa407ec4a74e137c16fed8e98a12a --- /dev/null +++ b/ferron/src/optional_modules/fproxy.rs @@ -0,0 +1,301 @@ +use std::error::Error; +use std::str::FromStr; + +use crate::ferron_common::{ + ErrorLogger, HyperUpgraded, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperResponse, WithRuntime}; +use async_trait::async_trait; +use http_body_util::combinators::BoxBody; +use http_body_util::BodyExt; +use hyper::body::Bytes; +use hyper::{header, Request, StatusCode, Uri}; +use hyper_tungstenite::HyperWebsocket; +use hyper_util::rt::TokioIo; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::net::TcpStream; +use tokio::runtime::Handle; + +pub fn server_module_init( + _config: &ServerConfig, +) -> Result, Box> { + Ok(Box::new(ForwardProxyModule::new())) +} + +struct ForwardProxyModule; + +impl ForwardProxyModule { + fn new() -> Self { + Self + } +} + +impl ServerModule for ForwardProxyModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(ForwardProxyModuleHandlers { handle }) + } +} + +struct ForwardProxyModuleHandlers { + handle: Handle, +} + +#[async_trait] +impl ServerModuleHandlers for ForwardProxyModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + // Code taken from reverse proxy module + let (hyper_request, _auth_user, _original_url) = request.into_parts(); + let (mut hyper_request_parts, request_body) = hyper_request.into_parts(); + + match hyper_request_parts.uri.scheme_str() { + Some("http") | None => (), + _ => { + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let host = match hyper_request_parts.uri.host() { + Some(host) => host, + None => { + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let port = hyper_request_parts.uri.port_u16().unwrap_or(80); + + let addr = format!("{}:{}", host, port); + let stream = match TcpStream::connect(addr).await { + Ok(stream) => stream, + Err(err) => { + match err.kind() { + tokio::io::ErrorKind::ConnectionRefused + | tokio::io::ErrorKind::NotFound + | tokio::io::ErrorKind::HostUnreachable => { + error_logger + .log(&format!("Service unavailable: {}", err)) + .await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::SERVICE_UNAVAILABLE) + .build(), + ); + } + tokio::io::ErrorKind::TimedOut => { + error_logger.log(&format!("Gateway timeout: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::GATEWAY_TIMEOUT) + .build(), + ); + } + _ => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + } + }; + + match stream.set_nodelay(true) { + Ok(_) => (), + Err(err) => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + + let hyper_request_path = hyper_request_parts.uri.path(); + + hyper_request_parts.uri = Uri::from_str(&format!( + "{}{}", + hyper_request_path, + match hyper_request_parts.uri.query() { + Some(query) => format!("?{}", query), + None => "".to_string(), + } + ))?; + + // Connection header to disable HTTP/1.1 keep-alive + hyper_request_parts + .headers + .insert(header::CONNECTION, "close".parse()?); + + let proxy_request = Request::from_parts(hyper_request_parts, request_body); + + http_proxy(stream, proxy_request, error_logger).await + }) + .await + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + upgraded_request: HyperUpgraded, + connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result<(), Box> { + WithRuntime::new(self.handle.clone(), async move { + let mut stream = match TcpStream::connect(connect_address).await { + Ok(stream) => stream, + Err(err) => { + error_logger + .log(&format!("Cannot connect to the remote server: {}", err)) + .await; + return Ok(()); + } + }; + match stream.set_nodelay(true) { + Ok(_) => (), + Err(err) => { + error_logger + .log(&format!( + "Cannot disable Nagle algorithm when connecting to the remote server: {}", + err + )) + .await; + return Ok(()); + } + }; + + let mut upgraded = TokioIo::new(upgraded_request); + + tokio::io::copy_bidirectional(&mut upgraded, &mut stream) + .await + .unwrap_or_default(); + + Ok(()) + }) + .await + } + + fn does_connect_proxy_requests(&mut self) -> bool { + true + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} + +async fn http_proxy( + stream: impl AsyncRead + AsyncWrite + Send + Unpin + 'static, + proxy_request: Request>, + error_logger: &ErrorLogger, +) -> Result> { + let io = TokioIo::new(stream); + + let (mut sender, conn) = match hyper::client::conn::http1::handshake(io).await { + Ok(data) => data, + Err(err) => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + + let send_request = sender.send_request(proxy_request); + + let mut pinned_conn = Box::pin(conn); + tokio::pin!(send_request); + + let response; + + loop { + tokio::select! { + biased; + + proxy_response = &mut send_request => { + let proxy_response = match proxy_response { + Ok(response) => response, + Err(err) => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok(ResponseData::builder_without_request().status(StatusCode::BAD_GATEWAY).build()); + } + }; + + response = ResponseData::builder_without_request() + .response(proxy_response.map(|b| { + b.map_err(|e| std::io::Error::other(e.to_string())) + .boxed() + })) + .parallel_fn(async move { + pinned_conn.await.unwrap_or_default(); + }) + .build(); + + break; + }, + state = &mut pinned_conn => { + if state.is_err() { + error_logger.log("Bad gateway: incomplete response").await; + return Ok(ResponseData::builder_without_request().status(StatusCode::BAD_GATEWAY).build()); + } + }, + }; + } + + Ok(response) +} diff --git a/ferron/src/optional_modules/rproxy.rs b/ferron/src/optional_modules/rproxy.rs new file mode 100644 index 0000000000000000000000000000000000000000..f839489492d0ed33076cc4e626081a4adea28502 --- /dev/null +++ b/ferron/src/optional_modules/rproxy.rs @@ -0,0 +1,803 @@ +use std::collections::HashMap; +use std::error::Error; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use crate::ferron_common::{ + ErrorLogger, HyperUpgraded, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperResponse, WithRuntime}; +use async_trait::async_trait; +use futures_util::{SinkExt, StreamExt}; +use http::uri::{PathAndQuery, Scheme}; +use http_body_util::combinators::BoxBody; +use http_body_util::BodyExt; +use hyper::body::Bytes; +use hyper::client::conn::http1::SendRequest; +use hyper::{header, Request, StatusCode, Uri}; +use hyper_tungstenite::HyperWebsocket; +use hyper_util::rt::TokioIo; +use rustls::pki_types::ServerName; +use rustls::RootCertStore; +use rustls_native_certs::load_native_certs; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::net::TcpStream; +use tokio::runtime::Handle; +use tokio::sync::RwLock; +use tokio_rustls::TlsConnector; +use tokio_tungstenite::Connector; + +use crate::ferron_util::no_server_verifier::NoServerVerifier; +use crate::ferron_util::ttl_cache::TtlCache; + +const DEFAULT_CONCURRENT_CONNECTIONS_PER_HOST: u32 = 32; + +pub fn server_module_init( + config: &ServerConfig, +) -> Result, Box> { + let mut roots: RootCertStore = RootCertStore::empty(); + let certs_result = load_native_certs(); + if !certs_result.errors.is_empty() { + Err(anyhow::anyhow!(format!( + "Couldn't load the native certificate store: {}", + certs_result.errors[0] + )))? + } + let certs = certs_result.certs; + + for cert in certs { + match roots.add(cert) { + Ok(_) => (), + Err(err) => Err(anyhow::anyhow!(format!( + "Couldn't add a certificate to the certificate store: {}", + err + )))?, + } + } + + let mut connections_vec = Vec::new(); + for _ in 0..DEFAULT_CONCURRENT_CONNECTIONS_PER_HOST { + connections_vec.push(RwLock::new(HashMap::new())); + } + Ok(Box::new(ReverseProxyModule::new( + Arc::new(roots), + Arc::new(connections_vec), + Arc::new(RwLock::new(TtlCache::new(Duration::from_millis( + config["global"]["loadBalancerHealthCheckWindow"] + .as_i64() + .unwrap_or(5000) as u64, + )))), + ))) +} + +#[allow(clippy::type_complexity)] +struct ReverseProxyModule { + roots: Arc, + connections: Arc>>>>>, + failed_backends: Arc>>, +} + +impl ReverseProxyModule { + #[allow(clippy::type_complexity)] + fn new( + roots: Arc, + connections: Arc>>>>>, + failed_backends: Arc>>, + ) -> Self { + Self { + roots, + connections, + failed_backends, + } + } +} + +impl ServerModule for ReverseProxyModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(ReverseProxyModuleHandlers { + roots: self.roots.clone(), + connections: self.connections.clone(), + failed_backends: self.failed_backends.clone(), + handle, + }) + } +} + +#[allow(clippy::type_complexity)] +struct ReverseProxyModuleHandlers { + handle: Handle, + roots: Arc, + connections: Arc>>>>>, + failed_backends: Arc>>, +} + +#[async_trait] +impl ServerModuleHandlers for ReverseProxyModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + let enable_health_check = config["enableLoadBalancerHealthCheck"] + .as_bool() + .unwrap_or(false); + let health_check_max_fails = config["loadBalancerHealthCheckMaximumFails"] + .as_i64() + .unwrap_or(3) as u64; + let disable_certificate_verification = config["disableProxyCertificateVerification"] + .as_bool() + .unwrap_or(false); + if let Some(proxy_to) = determine_proxy_to( + config, + socket_data.encrypted, + &self.failed_backends, + enable_health_check, + health_check_max_fails, + ) + .await + { + let (hyper_request, _auth_user, _original_url) = request.into_parts(); + let (mut hyper_request_parts, request_body) = hyper_request.into_parts(); + + let proxy_request_url = proxy_to.parse::()?; + let scheme_str = proxy_request_url.scheme_str(); + let mut encrypted = false; + + match scheme_str { + Some("http") => { + encrypted = false; + } + Some("https") => { + encrypted = true; + } + _ => Err(anyhow::anyhow!( + "Only HTTP and HTTPS reverse proxy URLs are supported." + ))?, + }; + + let host = match proxy_request_url.host() { + Some(host) => host, + None => Err(anyhow::anyhow!( + "The reverse proxy URL doesn't include the host" + ))?, + }; + + let port = proxy_request_url.port_u16().unwrap_or(match scheme_str { + Some("http") => 80, + Some("https") => 443, + _ => 80, + }); + + let addr = format!("{}:{}", host, port); + let authority = proxy_request_url.authority().cloned(); + + let hyper_request_path = hyper_request_parts.uri.path(); + + let path = match hyper_request_path.as_bytes().first() { + Some(b'/') => { + let mut proxy_request_path = proxy_request_url.path(); + while proxy_request_path.as_bytes().last().copied() == Some(b'/') { + proxy_request_path = &proxy_request_path[..(proxy_request_path.len() - 1)]; + } + format!("{}{}", proxy_request_path, hyper_request_path) + } + _ => hyper_request_path.to_string(), + }; + + hyper_request_parts.uri = Uri::from_str(&format!( + "{}{}", + path, + match hyper_request_parts.uri.query() { + Some(query) => format!("?{}", query), + None => "".to_string(), + } + ))?; + + let original_host = hyper_request_parts.headers.get(header::HOST).cloned(); + + // Host header for host identification + match authority { + Some(authority) => { + hyper_request_parts + .headers + .insert(header::HOST, authority.to_string().parse()?); + } + None => { + hyper_request_parts.headers.remove(header::HOST); + } + } + + // Connection header to enable HTTP/1.1 keep-alive + hyper_request_parts + .headers + .insert(header::CONNECTION, "keep-alive".parse()?); + + // X-Forwarded-* headers to send the client's data to a server that's behind the reverse proxy + hyper_request_parts.headers.insert( + "x-forwarded-for", + socket_data + .remote_addr + .ip() + .to_canonical() + .to_string() + .parse()?, + ); + + if socket_data.encrypted { + hyper_request_parts + .headers + .insert("x-forwarded-proto", "https".parse()?); + } else { + hyper_request_parts + .headers + .insert("x-forwarded-proto", "http".parse()?); + } + + if let Some(original_host) = original_host { + hyper_request_parts + .headers + .insert("x-forwarded-host", original_host); + } + + let proxy_request = Request::from_parts(hyper_request_parts, request_body); + + let connections = &self.connections[rand::random_range(..self.connections.len())]; + + let rwlock_read = connections.read().await; + let sender_read_option = rwlock_read.get(&addr); + + if let Some(sender_read) = sender_read_option { + if !sender_read.is_closed() { + drop(rwlock_read); + let mut rwlock_write = connections.write().await; + let sender_option = rwlock_write.get_mut(&addr); + + if let Some(sender) = sender_option { + if !sender.is_closed() { + let result = http_proxy_kept_alive(sender, proxy_request, error_logger).await; + drop(rwlock_write); + return result; + } else { + drop(rwlock_write); + } + } else { + drop(rwlock_write); + } + } else { + drop(rwlock_read); + } + } else { + drop(rwlock_read); + } + + let stream = match TcpStream::connect(&addr).await { + Ok(stream) => stream, + Err(err) => { + if enable_health_check { + let mut failed_backends_write = self.failed_backends.write().await; + let proxy_to = proxy_to.clone(); + let failed_attempts = failed_backends_write.get(&proxy_to); + failed_backends_write.insert(proxy_to, failed_attempts.map_or(1, |x| x + 1)); + } + match err.kind() { + tokio::io::ErrorKind::ConnectionRefused + | tokio::io::ErrorKind::NotFound + | tokio::io::ErrorKind::HostUnreachable => { + error_logger + .log(&format!("Service unavailable: {}", err)) + .await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::SERVICE_UNAVAILABLE) + .build(), + ); + } + tokio::io::ErrorKind::TimedOut => { + error_logger.log(&format!("Gateway timeout: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::GATEWAY_TIMEOUT) + .build(), + ); + } + _ => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + } + }; + + match stream.set_nodelay(true) { + Ok(_) => (), + Err(err) => { + if enable_health_check { + let mut failed_backends_write = self.failed_backends.write().await; + let proxy_to = proxy_to.clone(); + let failed_attempts = failed_backends_write.get(&proxy_to); + failed_backends_write.insert(proxy_to, failed_attempts.map_or(1, |x| x + 1)); + } + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + + let failed_backends_option_borrowed = if enable_health_check { + Some(&*self.failed_backends) + } else { + None + }; + + if !encrypted { + http_proxy( + connections, + addr, + stream, + proxy_request, + error_logger, + proxy_to, + failed_backends_option_borrowed, + ) + .await + } else { + let tls_client_config = (if disable_certificate_verification { + rustls::ClientConfig::builder() + .dangerous() + .with_custom_certificate_verifier(Arc::new(NoServerVerifier::new())) + } else { + rustls::ClientConfig::builder().with_root_certificates(self.roots.clone()) + }) + .with_no_client_auth(); + let connector = TlsConnector::from(Arc::new(tls_client_config)); + let domain = ServerName::try_from(host)?.to_owned(); + + let tls_stream = match connector.connect(domain, stream).await { + Ok(stream) => stream, + Err(err) => { + if enable_health_check { + let mut failed_backends_write = self.failed_backends.write().await; + let proxy_to = proxy_to.clone(); + let failed_attempts = failed_backends_write.get(&proxy_to); + failed_backends_write.insert(proxy_to, failed_attempts.map_or(1, |x| x + 1)); + } + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + + http_proxy( + connections, + addr, + tls_stream, + proxy_request, + error_logger, + proxy_to, + failed_backends_option_borrowed, + ) + .await + } + } else { + Ok(ResponseData::builder(request).build()) + } + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + websocket: HyperWebsocket, + uri: &hyper::Uri, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result<(), Box> { + WithRuntime::new(self.handle.clone(), async move { + let enable_health_check = config["enableLoadBalancerHealthCheck"] + .as_bool() + .unwrap_or(false); + let health_check_max_fails = config["loadBalancerHealthCheckMaximumFails"] + .as_i64() + .unwrap_or(3) as u64; + + let disable_certificate_verification = config["disableProxyCertificateVerification"] + .as_bool() + .unwrap_or(false); + if let Some(proxy_to) = determine_proxy_to( + config, + socket_data.encrypted, + &self.failed_backends, + enable_health_check, + health_check_max_fails, + ) + .await + { + let proxy_request_url = proxy_to.parse::()?; + let scheme_str = proxy_request_url.scheme_str(); + let mut encrypted = false; + + match scheme_str { + Some("http") => { + encrypted = false; + } + Some("https") => { + encrypted = true; + } + _ => Err(anyhow::anyhow!( + "Only HTTP and HTTPS reverse proxy URLs are supported." + ))?, + }; + + let request_path = uri.path(); + + let path = match request_path.as_bytes().first() { + Some(b'/') => { + let mut proxy_request_path = proxy_request_url.path(); + while proxy_request_path.as_bytes().last().copied() == Some(b'/') { + proxy_request_path = &proxy_request_path[..(proxy_request_path.len() - 1)]; + } + format!("{}{}", proxy_request_path, request_path) + } + _ => request_path.to_string(), + }; + + let mut proxy_request_url_parts = proxy_request_url.into_parts(); + proxy_request_url_parts.scheme = if encrypted { + Some(Scheme::from_str("wss")?) + } else { + Some(Scheme::from_str("ws")?) + }; + match proxy_request_url_parts.path_and_query { + Some(path_and_query) => { + let path_and_query_string = match path_and_query.query() { + Some(query) => { + format!("{}?{}", path, query) + } + None => path, + }; + proxy_request_url_parts.path_and_query = + Some(PathAndQuery::from_str(&path_and_query_string)?); + } + None => { + proxy_request_url_parts.path_and_query = Some(PathAndQuery::from_str(&path)?); + } + }; + + let proxy_request_url = hyper::Uri::from_parts(proxy_request_url_parts)?; + + let connector = if !encrypted { + Connector::Plain + } else { + Connector::Rustls(Arc::new( + (if disable_certificate_verification { + rustls::ClientConfig::builder() + .dangerous() + .with_custom_certificate_verifier(Arc::new(NoServerVerifier::new())) + } else { + rustls::ClientConfig::builder().with_root_certificates(self.roots.clone()) + }) + .with_no_client_auth(), + )) + }; + + let client_bi_stream = websocket.await?; + + let (proxy_bi_stream, _) = match tokio_tungstenite::connect_async_tls_with_config( + proxy_request_url, + None, + true, + Some(connector), + ) + .await + { + Ok(data) => data, + Err(err) => { + error_logger + .log(&format!("Cannot connect to WebSocket server: {}", err)) + .await; + return Ok(()); + } + }; + + let (mut client_sink, mut client_stream) = client_bi_stream.split(); + let (mut proxy_sink, mut proxy_stream) = proxy_bi_stream.split(); + + let client_to_proxy = async { + while let Some(Ok(value)) = client_stream.next().await { + if proxy_sink.send(value).await.is_err() { + break; + } + } + }; + + let proxy_to_client = async { + while let Some(Ok(value)) = proxy_stream.next().await { + if client_sink.send(value).await.is_err() { + break; + } + } + }; + + tokio::pin!(client_to_proxy); + tokio::pin!(proxy_to_client); + + let client_to_proxy_first; + tokio::select! { + _ = &mut client_to_proxy => { + client_to_proxy_first = true; + } + _ = &mut proxy_to_client => { + client_to_proxy_first = false; + } + } + + if client_to_proxy_first { + proxy_to_client.await; + } else { + client_to_proxy.await; + } + } + + Ok(()) + }) + .await + } + + fn does_websocket_requests(&mut self, config: &ServerConfig, socket_data: &SocketData) -> bool { + if socket_data.encrypted { + let secure_proxy_to = &config["secureProxyTo"]; + if secure_proxy_to.as_vec().is_some() || secure_proxy_to.as_str().is_some() { + return true; + } + } + + let proxy_to = &config["proxyTo"]; + proxy_to.as_vec().is_some() || proxy_to.as_str().is_some() + } +} + +async fn determine_proxy_to( + config: &ServerConfig, + encrypted: bool, + failed_backends: &RwLock>, + enable_health_check: bool, + health_check_max_fails: u64, +) -> Option { + let mut proxy_to = None; + // When the array is supplied with non-string values, the reverse proxy may have undesirable behavior + // The "proxyTo" and "secureProxyTo" are validated though. + + if encrypted { + let secure_proxy_to_yaml = &config["secureProxyTo"]; + if let Some(secure_proxy_to_vector) = secure_proxy_to_yaml.as_vec() { + if enable_health_check { + let mut secure_proxy_to_vector = secure_proxy_to_vector.clone(); + loop { + if !secure_proxy_to_vector.is_empty() { + let index = rand::random_range(..secure_proxy_to_vector.len()); + if let Some(secure_proxy_to) = secure_proxy_to_vector[index].as_str() { + proxy_to = Some(secure_proxy_to.to_string()); + let failed_backends_read = failed_backends.read().await; + let failed_backend_fails = + match failed_backends_read.get(&secure_proxy_to.to_string()) { + Some(fails) => fails, + None => break, + }; + if failed_backend_fails > health_check_max_fails { + secure_proxy_to_vector.remove(index); + } else { + break; + } + } + } else { + break; + } + } + } else if !secure_proxy_to_vector.is_empty() { + if let Some(secure_proxy_to) = + secure_proxy_to_vector[rand::random_range(..secure_proxy_to_vector.len())].as_str() + { + proxy_to = Some(secure_proxy_to.to_string()); + } + } + } else if let Some(secure_proxy_to) = secure_proxy_to_yaml.as_str() { + proxy_to = Some(secure_proxy_to.to_string()); + } + } + + if proxy_to.is_none() { + let proxy_to_yaml = &config["proxyTo"]; + if let Some(proxy_to_vector) = proxy_to_yaml.as_vec() { + if enable_health_check { + let mut proxy_to_vector = proxy_to_vector.clone(); + loop { + if !proxy_to_vector.is_empty() { + let index = rand::random_range(..proxy_to_vector.len()); + if let Some(proxy_to_str) = proxy_to_vector[index].as_str() { + proxy_to = Some(proxy_to_str.to_string()); + let failed_backends_read = failed_backends.read().await; + let failed_backend_fails = match failed_backends_read.get(&proxy_to_str.to_string()) { + Some(fails) => fails, + None => break, + }; + if failed_backend_fails > health_check_max_fails { + proxy_to_vector.remove(index); + } else { + break; + } + } + } else { + break; + } + } + } else if !proxy_to_vector.is_empty() { + if let Some(proxy_to_str) = + proxy_to_vector[rand::random_range(..proxy_to_vector.len())].as_str() + { + proxy_to = Some(proxy_to_str.to_string()); + } + } + } else if let Some(proxy_to_str) = proxy_to_yaml.as_str() { + proxy_to = Some(proxy_to_str.to_string()); + } + } + + proxy_to +} + +async fn http_proxy( + connections: &RwLock>>>, + connect_addr: String, + stream: impl AsyncRead + AsyncWrite + Send + Unpin + 'static, + proxy_request: Request>, + error_logger: &ErrorLogger, + proxy_to: String, + failed_backends: Option<&tokio::sync::RwLock>>, +) -> Result> { + let io = TokioIo::new(stream); + + let (mut sender, conn) = match hyper::client::conn::http1::handshake(io).await { + Ok(data) => data, + Err(err) => { + if let Some(failed_backends) = failed_backends { + let mut failed_backends_write = failed_backends.write().await; + let failed_attempts = failed_backends_write.get(&proxy_to); + failed_backends_write.insert(proxy_to, failed_attempts.map_or(1, |x| x + 1)); + } + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + + let send_request = sender.send_request(proxy_request); + + let mut pinned_conn = Box::pin(conn); + tokio::pin!(send_request); + + let response; + + loop { + tokio::select! { + biased; + + proxy_response = &mut send_request => { + let proxy_response = match proxy_response { + Ok(response) => response, + Err(err) => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok(ResponseData::builder_without_request().status(StatusCode::BAD_GATEWAY).build()); + } + }; + + response = ResponseData::builder_without_request() + .response(proxy_response.map(|b| { + b.map_err(|e| std::io::Error::other(e.to_string())) + .boxed() + })) + .parallel_fn(async move { + pinned_conn.await.unwrap_or_default(); + }) + .build(); + + break; + }, + state = &mut pinned_conn => { + if state.is_err() { + error_logger.log("Bad gateway: incomplete response").await; + return Ok(ResponseData::builder_without_request().status(StatusCode::BAD_GATEWAY).build()); + } + }, + }; + } + + if !sender.is_closed() { + let mut rwlock_write = connections.write().await; + rwlock_write.insert(connect_addr, sender); + drop(rwlock_write); + } + + Ok(response) +} + +async fn http_proxy_kept_alive( + sender: &mut SendRequest>, + proxy_request: Request>, + error_logger: &ErrorLogger, +) -> Result> { + let proxy_response = match sender.send_request(proxy_request).await { + Ok(response) => response, + Err(err) => { + error_logger.log(&format!("Bad gateway: {}", err)).await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::BAD_GATEWAY) + .build(), + ); + } + }; + + let response = ResponseData::builder_without_request() + .response(proxy_response.map(|b| b.map_err(|e| std::io::Error::other(e.to_string())).boxed())) + .build(); + + Ok(response) +} diff --git a/ferron/src/optional_modules/scgi.rs b/ferron/src/optional_modules/scgi.rs new file mode 100644 index 0000000000000000000000000000000000000000..532205e63be5a5a90f29561d1af006f8368f589a --- /dev/null +++ b/ferron/src/optional_modules/scgi.rs @@ -0,0 +1,673 @@ +// SCGI handler code inspired by SVR.JS's OrangeCircle mod, translated from JavaScript to Rust. +// Based on the "cgi" module +use std::env; +use std::error::Error; +use std::path::{Path, PathBuf}; + +use crate::ferron_common::{ + ErrorLogger, HyperRequest, HyperResponse, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperUpgraded, WithRuntime}; +use async_trait::async_trait; +use futures_util::TryStreamExt; +use hashlink::LinkedHashMap; +use http_body_util::{BodyExt, StreamBody}; +use httparse::EMPTY_HEADER; +use hyper::body::Frame; +use hyper::{header, Response, StatusCode}; +use hyper_tungstenite::HyperWebsocket; +use tokio::fs; +use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; +use tokio::net::TcpStream; +use tokio::runtime::Handle; +use tokio_util::io::{ReaderStream, StreamReader}; + +use crate::ferron_res::server_software::SERVER_SOFTWARE; +use crate::ferron_util::cgi_response::CgiResponse; +use crate::ferron_util::copy_move::Copier; + +pub fn server_module_init( + _config: &ServerConfig, +) -> Result, Box> { + Ok(Box::new(ScgiModule::new())) +} + +struct ScgiModule; + +impl ScgiModule { + fn new() -> Self { + Self + } +} + +impl ServerModule for ScgiModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(ScgiModuleHandlers { handle }) + } +} +struct ScgiModuleHandlers { + handle: Handle, +} + +#[async_trait] +impl ServerModuleHandlers for ScgiModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + let mut scgi_to = "tcp://localhost:4000/"; + let scgi_to_yaml = &config["scgiTo"]; + if let Some(scgi_to_obtained) = scgi_to_yaml.as_str() { + scgi_to = scgi_to_obtained; + } + + let mut scgi_path = None; + if let Some(scgi_path_obtained) = config["scgiPath"].as_str() { + scgi_path = Some(scgi_path_obtained.to_string()); + } + + let hyper_request = request.get_hyper_request(); + + let request_path = hyper_request.uri().path(); + let mut request_path_bytes = request_path.bytes(); + if request_path_bytes.len() < 1 || request_path_bytes.nth(0) != Some(b'/') { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + + if let Some(scgi_path) = scgi_path { + let mut canonical_scgi_path: &str = &scgi_path; + if canonical_scgi_path.bytes().last() == Some(b'/') { + canonical_scgi_path = &canonical_scgi_path[..(canonical_scgi_path.len() - 1)]; + } + + let request_path_with_slashes = match request_path == canonical_scgi_path { + true => format!("{}/", request_path), + false => request_path.to_string(), + }; + if let Some(stripped_request_path) = + request_path_with_slashes.strip_prefix(canonical_scgi_path) + { + let wwwroot_yaml = &config["wwwroot"]; + let wwwroot = wwwroot_yaml.as_str().unwrap_or("/nonexistent"); + + let wwwroot_unknown = PathBuf::from(wwwroot); + let wwwroot_pathbuf = match wwwroot_unknown.as_path().is_absolute() { + true => wwwroot_unknown, + false => match fs::canonicalize(&wwwroot_unknown).await { + Ok(pathbuf) => pathbuf, + Err(_) => wwwroot_unknown, + }, + }; + let wwwroot = wwwroot_pathbuf.as_path(); + + let mut relative_path = &request_path[1..]; + while relative_path.as_bytes().first().copied() == Some(b'/') { + relative_path = &relative_path[1..]; + } + + let decoded_relative_path = match urlencoding::decode(relative_path) { + Ok(path) => path.to_string(), + Err(_) => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let joined_pathbuf = wwwroot.join(decoded_relative_path); + let execute_pathbuf = joined_pathbuf; + let execute_path_info = stripped_request_path + .strip_prefix("/") + .map(|s| s.to_string()); + + return execute_scgi_with_environment_variables( + request, + socket_data, + error_logger, + wwwroot, + execute_pathbuf, + execute_path_info, + config["serverAdministratorEmail"].as_str(), + scgi_to, + ) + .await; + } + } + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} + +#[allow(clippy::too_many_arguments)] +async fn execute_scgi_with_environment_variables( + request: RequestData, + socket_data: &SocketData, + error_logger: &ErrorLogger, + wwwroot: &Path, + execute_pathbuf: PathBuf, + path_info: Option, + server_administrator_email: Option<&str>, + scgi_to: &str, +) -> Result> { + let mut environment_variables: LinkedHashMap = LinkedHashMap::new(); + + let hyper_request = request.get_hyper_request(); + let original_request_uri = request.get_original_url().unwrap_or(hyper_request.uri()); + + if let Some(auth_user) = request.get_auth_user() { + if let Some(authorization) = hyper_request.headers().get(header::AUTHORIZATION) { + let authorization_value = String::from_utf8_lossy(authorization.as_bytes()).to_string(); + let mut authorization_value_split = authorization_value.split(" "); + if let Some(authorization_type) = authorization_value_split.next() { + environment_variables.insert("AUTH_TYPE".to_string(), authorization_type.to_string()); + } + } + environment_variables.insert("REMOTE_USER".to_string(), auth_user.to_string()); + } + + environment_variables.insert( + "QUERY_STRING".to_string(), + match hyper_request.uri().query() { + Some(query) => query.to_string(), + None => "".to_string(), + }, + ); + + environment_variables.insert("SERVER_SOFTWARE".to_string(), SERVER_SOFTWARE.to_string()); + environment_variables.insert( + "SERVER_PROTOCOL".to_string(), + match hyper_request.version() { + hyper::Version::HTTP_09 => "HTTP/0.9".to_string(), + hyper::Version::HTTP_10 => "HTTP/1.0".to_string(), + hyper::Version::HTTP_11 => "HTTP/1.1".to_string(), + hyper::Version::HTTP_2 => "HTTP/2.0".to_string(), + hyper::Version::HTTP_3 => "HTTP/3.0".to_string(), + _ => "HTTP/Unknown".to_string(), + }, + ); + environment_variables.insert( + "SERVER_PORT".to_string(), + socket_data.local_addr.port().to_string(), + ); + environment_variables.insert( + "SERVER_ADDR".to_string(), + socket_data.local_addr.ip().to_canonical().to_string(), + ); + if let Some(server_administrator_email) = server_administrator_email { + environment_variables.insert( + "SERVER_ADMIN".to_string(), + server_administrator_email.to_string(), + ); + } + if let Some(host) = hyper_request.headers().get(header::HOST) { + environment_variables.insert( + "SERVER_NAME".to_string(), + String::from_utf8_lossy(host.as_bytes()).to_string(), + ); + } + + environment_variables.insert( + "DOCUMENT_ROOT".to_string(), + wwwroot.to_string_lossy().to_string(), + ); + environment_variables.insert( + "PATH_INFO".to_string(), + match &path_info { + Some(path_info) => format!("/{}", path_info), + None => "".to_string(), + }, + ); + environment_variables.insert( + "PATH_TRANSLATED".to_string(), + match &path_info { + Some(path_info) => { + let mut path_translated = execute_pathbuf.clone(); + path_translated.push(path_info); + path_translated.to_string_lossy().to_string() + } + None => "".to_string(), + }, + ); + environment_variables.insert( + "REQUEST_METHOD".to_string(), + hyper_request.method().to_string(), + ); + environment_variables.insert("GATEWAY_INTERFACE".to_string(), "CGI/1.1".to_string()); + environment_variables.insert("SCGI".to_string(), "1".to_string()); + environment_variables.insert( + "REQUEST_URI".to_string(), + format!( + "{}{}", + original_request_uri.path(), + match original_request_uri.query() { + Some(query) => format!("?{}", query), + None => String::from(""), + } + ), + ); + + environment_variables.insert( + "REMOTE_PORT".to_string(), + socket_data.remote_addr.port().to_string(), + ); + environment_variables.insert( + "REMOTE_ADDR".to_string(), + socket_data.remote_addr.ip().to_canonical().to_string(), + ); + + environment_variables.insert( + "SCRIPT_FILENAME".to_string(), + execute_pathbuf.to_string_lossy().to_string(), + ); + if let Ok(script_path) = execute_pathbuf.as_path().strip_prefix(wwwroot) { + environment_variables.insert( + "SCRIPT_NAME".to_string(), + format!( + "/{}", + match cfg!(windows) { + true => script_path.to_string_lossy().to_string().replace("\\", "/"), + false => script_path.to_string_lossy().to_string(), + } + ), + ); + } + + if socket_data.encrypted { + environment_variables.insert("HTTPS".to_string(), "ON".to_string()); + } + + let mut content_length_set = false; + for (header_name, header_value) in hyper_request.headers().iter() { + let env_header_name = match *header_name { + header::CONTENT_LENGTH => { + content_length_set = true; + "CONTENT_LENGTH".to_string() + } + header::CONTENT_TYPE => "CONTENT_TYPE".to_string(), + _ => { + let mut result = String::new(); + + result.push_str("HTTP_"); + + for c in header_name.as_str().to_uppercase().chars() { + if c.is_alphanumeric() { + result.push(c); + } else { + result.push('_'); + } + } + + result + } + }; + if environment_variables.contains_key(&env_header_name) { + let value = environment_variables.get_mut(&env_header_name); + if let Some(value) = value { + if env_header_name == "HTTP_COOKIE" { + value.push_str("; "); + } else { + // See https://stackoverflow.com/a/1801191 + value.push_str(", "); + } + value.push_str(String::from_utf8_lossy(header_value.as_bytes()).as_ref()); + } else { + environment_variables.insert( + env_header_name, + String::from_utf8_lossy(header_value.as_bytes()).to_string(), + ); + } + } else { + environment_variables.insert( + env_header_name, + String::from_utf8_lossy(header_value.as_bytes()).to_string(), + ); + } + } + + if !content_length_set { + environment_variables.insert("CONTENT_LENGTH".to_string(), "0".to_string()); + } + + let (hyper_request, _, _) = request.into_parts(); + + execute_scgi(hyper_request, error_logger, scgi_to, environment_variables).await +} + +async fn execute_scgi( + hyper_request: HyperRequest, + error_logger: &ErrorLogger, + scgi_to: &str, + mut environment_variables: LinkedHashMap, +) -> Result> { + let (_, body) = hyper_request.into_parts(); + + // Insert other environment variables + for (key, value) in env::vars_os() { + let key_string = key.to_string_lossy().to_string(); + let value_string = value.to_string_lossy().to_string(); + environment_variables + .entry(key_string) + .or_insert(value_string); + } + + let scgi_to_fixed = if let Some(stripped) = scgi_to.strip_prefix("unix:///") { + // hyper::Uri fails to parse a string if there is an empty authority, so add an "ignore" authority to Unix socket URLs + &format!("unix://ignore/{}", stripped) + } else { + scgi_to + }; + + let scgi_to_url = scgi_to_fixed.parse::()?; + let scheme_str = scgi_to_url.scheme_str(); + + let (socket_reader, mut socket_writer) = match scheme_str { + Some("tcp") => { + let host = match scgi_to_url.host() { + Some(host) => host, + None => Err(anyhow::anyhow!("The SCGI URL doesn't include the host"))?, + }; + + let port = match scgi_to_url.port_u16() { + Some(port) => port, + None => Err(anyhow::anyhow!("The SCGI URL doesn't include the port"))?, + }; + + let addr = format!("{}:{}", host, port); + + match connect_tcp(&addr).await { + Ok(data) => data, + Err(err) => match err.kind() { + tokio::io::ErrorKind::ConnectionRefused + | tokio::io::ErrorKind::NotFound + | tokio::io::ErrorKind::HostUnreachable => { + error_logger + .log(&format!("Service unavailable: {}", err)) + .await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::SERVICE_UNAVAILABLE) + .build(), + ); + } + _ => Err(err)?, + }, + } + } + Some("unix") => { + let path = scgi_to_url.path(); + match connect_unix(path).await { + Ok(data) => data, + Err(err) => match err.kind() { + tokio::io::ErrorKind::ConnectionRefused + | tokio::io::ErrorKind::NotFound + | tokio::io::ErrorKind::HostUnreachable => { + error_logger + .log(&format!("Service unavailable: {}", err)) + .await; + return Ok( + ResponseData::builder_without_request() + .status(StatusCode::SERVICE_UNAVAILABLE) + .build(), + ); + } + _ => Err(err)?, + }, + } + } + _ => Err(anyhow::anyhow!( + "Only HTTP and HTTPS reverse proxy URLs are supported." + ))?, + }; + + // Create environment variable netstring + let mut environment_variables_to_wrap = Vec::new(); + for (key, value) in environment_variables.iter() { + let mut environment_variable = Vec::new(); + environment_variable.extend_from_slice(key.as_bytes()); + environment_variable.push(b'\0'); + environment_variable.extend_from_slice(value.as_bytes()); + environment_variable.push(b'\0'); + if key == "CONTENT_LENGTH" { + environment_variable.append(&mut environment_variables_to_wrap); + environment_variables_to_wrap = environment_variable; + } else { + environment_variables_to_wrap.append(&mut environment_variable); + } + } + + let environment_variables_to_wrap_length = environment_variables_to_wrap.len(); + let mut environment_variables_netstring = Vec::new(); + environment_variables_netstring + .extend_from_slice(environment_variables_to_wrap_length.to_string().as_bytes()); + environment_variables_netstring.push(b':'); + environment_variables_netstring.append(&mut environment_variables_to_wrap); + environment_variables_netstring.push(b','); + + // Write environment variable netstring + socket_writer + .write_all(&environment_variables_netstring) + .await?; + + let cgi_stdin_reader = StreamReader::new(body.into_data_stream().map_err(std::io::Error::other)); + + // Emulated standard input and standard output + // SCGI doesn't support standard error + let stdin = socket_writer; + let stdout = socket_reader; + + let mut cgi_response = CgiResponse::new(stdout); + + let stdin_copy_future = Copier::new(cgi_stdin_reader, stdin).copy(); + let mut stdin_copy_future_pinned = Box::pin(stdin_copy_future); + + let mut headers = [EMPTY_HEADER; 128]; + + let mut early_stdin_copied = false; + + // Needed to wrap this in another scope to prevent errors with multiple mutable borrows. + { + let mut head_obtained = false; + let stdout_parse_future = cgi_response.get_head(); + tokio::pin!(stdout_parse_future); + + // Cannot use a loop with tokio::select, since stdin_copy_future_pinned being constantly ready will make the web server stop responding to HTTP requests + tokio::select! { + biased; + + obtained_head = &mut stdout_parse_future => { + let obtained_head = obtained_head?; + if !obtained_head.is_empty() { + httparse::parse_headers(obtained_head, &mut headers)?; + } + head_obtained = true; + }, + result = &mut stdin_copy_future_pinned => { + early_stdin_copied = true; + result?; + } + } + + if !head_obtained { + // Kept it same as in the tokio::select macro + let obtained_head = stdout_parse_future.await?; + if !obtained_head.is_empty() { + httparse::parse_headers(obtained_head, &mut headers)?; + } + } + } + + let mut response_builder = Response::builder(); + let mut status_code = 200; + for header in headers { + if header == EMPTY_HEADER { + break; + } + let mut is_status_header = false; + match &header.name.to_lowercase() as &str { + "location" => { + if !(300..=399).contains(&status_code) { + status_code = 302; + } + } + "status" => { + is_status_header = true; + let header_value_cow = String::from_utf8_lossy(header.value); + let mut split_status = header_value_cow.split(" "); + let first_part = split_status.next(); + if let Some(first_part) = first_part { + if first_part.starts_with("HTTP/") { + let second_part = split_status.next(); + if let Some(second_part) = second_part { + if let Ok(parsed_status_code) = second_part.parse::() { + status_code = parsed_status_code; + } + } + } else if let Ok(parsed_status_code) = first_part.parse::() { + status_code = parsed_status_code; + } + } + } + _ => (), + } + if !is_status_header { + response_builder = response_builder.header(header.name, header.value); + } + } + + response_builder = response_builder.status(status_code); + + let reader_stream = ReaderStream::new(cgi_response); + let stream_body = StreamBody::new(reader_stream.map_ok(Frame::data)); + let boxed_body = stream_body.boxed(); + + let response = response_builder.body(boxed_body)?; + + Ok( + ResponseData::builder_without_request() + .response(response) + .parallel_fn(async move { + if !early_stdin_copied { + stdin_copy_future_pinned.await.unwrap_or_default(); + } + }) + .build(), + ) +} + +async fn connect_tcp( + addr: &str, +) -> Result< + ( + Box, + Box, + ), + tokio::io::Error, +> { + let socket = TcpStream::connect(addr).await?; + socket.set_nodelay(true)?; + + let (socket_reader_set, socket_writer_set) = tokio::io::split(socket); + Ok((Box::new(socket_reader_set), Box::new(socket_writer_set))) +} + +#[allow(dead_code)] +#[cfg(unix)] +async fn connect_unix( + path: &str, +) -> Result< + ( + Box, + Box, + ), + tokio::io::Error, +> { + use tokio::net::UnixStream; + + let socket = UnixStream::connect(path).await?; + + let (socket_reader_set, socket_writer_set) = tokio::io::split(socket); + Ok((Box::new(socket_reader_set), Box::new(socket_writer_set))) +} + +#[allow(dead_code)] +#[cfg(not(unix))] +async fn connect_unix( + _path: &str, +) -> Result< + ( + Box, + Box, + ), + tokio::io::Error, +> { + Err(tokio::io::Error::new( + tokio::io::ErrorKind::Unsupported, + "Unix sockets are not supports on non-Unix platforms.", + )) +} diff --git a/ferron/src/optional_modules/wsgi.rs b/ferron/src/optional_modules/wsgi.rs new file mode 100644 index 0000000000000000000000000000000000000000..66ba5a7baeeaef403b71d0d9dc42bfb5710f9483 --- /dev/null +++ b/ferron/src/optional_modules/wsgi.rs @@ -0,0 +1,742 @@ +// This module would provide higher WSGI application performance, +// if it used a process pool dedicated for WSGI applications (aka pre-fork model) +// instead of spawning blocking threads in a Tokio runtime, +// because of Python's GIL, which causes the WSGI application in current setup +// to effectively run as single-threaded single-process. +// Pre-forking a process pool isn't supported on Windows. + +use std::collections::HashMap; +use std::error::Error; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::Arc; + +use crate::ferron_common::{ + ErrorLogger, HyperRequest, HyperUpgraded, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperResponse, WithRuntime}; +use crate::ferron_res::server_software::SERVER_SOFTWARE; +use crate::ferron_util::ip_match::ip_match; +use crate::ferron_util::match_hostname::match_hostname; +use crate::ferron_util::match_location::match_location; +use crate::ferron_util::wsgi_error_stream::WsgiErrorStream; +use crate::ferron_util::wsgi_input_stream::WsgiInputStream; +use crate::ferron_util::wsgi_load_application::load_wsgi_application; +use crate::ferron_util::wsgi_structs::{WsgiApplicationLocationWrap, WsgiApplicationWrap}; +use async_trait::async_trait; +use futures_util::{StreamExt, TryStreamExt}; +use hashlink::LinkedHashMap; +use http::{HeaderMap, HeaderName, HeaderValue, StatusCode}; +use http_body_util::{BodyExt, Empty, StreamBody}; +use hyper::body::{Bytes, Frame}; +use hyper::header; +use hyper::Response; +use hyper_tungstenite::HyperWebsocket; +use pyo3::exceptions::{PyAssertionError, PyException}; +use pyo3::prelude::*; +use pyo3::types::{PyAny, PyBool, PyCFunction, PyDict, PyIterator, PyString, PyTuple}; +use tokio::fs; +use tokio::io::AsyncReadExt; +use tokio::runtime::Handle; +use tokio::sync::Mutex; +use tokio_util::io::StreamReader; + +pub fn server_module_init( + config: &ServerConfig, +) -> Result, Box> { + let mut global_wsgi_application = None; + let mut host_wsgi_applications = Vec::new(); + let clear_sys_path = config["global"]["wsgiClearModuleImportPath"] + .as_bool() + .unwrap_or(false); + if let Some(wsgi_application_path) = config["global"]["wsgiApplicationPath"].as_str() { + global_wsgi_application = Some(Arc::new(load_wsgi_application( + PathBuf::from_str(wsgi_application_path)?.as_path(), + clear_sys_path, + )?)); + } + let global_wsgi_path = config["global"]["wsgiPath"].as_str().map(|s| s.to_string()); + + if let Some(hosts) = config["hosts"].as_vec() { + for host_yaml in hosts.iter() { + let domain = host_yaml["domain"].as_str().map(String::from); + let ip = host_yaml["ip"].as_str().map(String::from); + let mut locations = Vec::new(); + if let Some(locations_yaml) = host_yaml["locations"].as_vec() { + for location_yaml in locations_yaml.iter() { + if let Some(path_str) = location_yaml["path"].as_str() { + let path = String::from(path_str); + if let Some(wsgi_application_path) = location_yaml["wsgiApplicationPath"].as_str() { + locations.push(WsgiApplicationLocationWrap::new( + path, + Arc::new(load_wsgi_application( + PathBuf::from_str(wsgi_application_path)?.as_path(), + clear_sys_path, + )?), + location_yaml["wsgiPath"].as_str().map(|s| s.to_string()), + )); + } + } + } + } + if let Some(wsgi_application_path) = host_yaml["wsgiApplicationPath"].as_str() { + host_wsgi_applications.push(WsgiApplicationWrap::new( + domain, + ip, + Some(Arc::new(load_wsgi_application( + PathBuf::from_str(wsgi_application_path)?.as_path(), + clear_sys_path, + )?)), + host_yaml["wsgiPath"].as_str().map(|s| s.to_string()), + locations, + )); + } else if !locations.is_empty() { + host_wsgi_applications.push(WsgiApplicationWrap::new( + domain, + ip, + None, + host_yaml["wsgiPath"].as_str().map(|s| s.to_string()), + locations, + )); + } + } + } + + Ok(Box::new(WsgiModule::new( + global_wsgi_application, + global_wsgi_path, + Arc::new(host_wsgi_applications), + ))) +} + +struct WsgiModule { + global_wsgi_application: Option>>, + global_wsgi_path: Option, + host_wsgi_applications: Arc>, +} + +impl WsgiModule { + fn new( + global_wsgi_application: Option>>, + global_wsgi_path: Option, + host_wsgi_applications: Arc>, + ) -> Self { + Self { + global_wsgi_application, + global_wsgi_path, + host_wsgi_applications, + } + } +} + +impl ServerModule for WsgiModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(WsgiModuleHandlers { + handle, + global_wsgi_application: self.global_wsgi_application.clone(), + global_wsgi_path: self.global_wsgi_path.clone(), + host_wsgi_applications: self.host_wsgi_applications.clone(), + }) + } +} + +struct WsgiModuleHandlers { + handle: Handle, + global_wsgi_application: Option>>, + global_wsgi_path: Option, + host_wsgi_applications: Arc>, +} + +#[async_trait] +impl ServerModuleHandlers for WsgiModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + let hyper_request = request.get_hyper_request(); + + // Use .take() instead of .clone(), since the values in Options will only be used once. + let mut wsgi_application = self.global_wsgi_application.take(); + let mut wsgi_path = self.global_wsgi_path.take(); + + // Should have used a HashMap instead of iterating over an array for better performance... + for host_wsgi_application_wrap in self.host_wsgi_applications.iter() { + if match_hostname( + match &host_wsgi_application_wrap.domain { + Some(value) => Some(value as &str), + None => None, + }, + match hyper_request.headers().get(header::HOST) { + Some(value) => value.to_str().ok(), + None => None, + }, + ) && match &host_wsgi_application_wrap.ip { + Some(value) => ip_match(value as &str, socket_data.remote_addr.ip()), + None => true, + } { + wsgi_application = host_wsgi_application_wrap.wsgi_application.clone(); + wsgi_path = host_wsgi_application_wrap.wsgi_path.clone(); + if let Ok(path_decoded) = urlencoding::decode( + request + .get_original_url() + .unwrap_or(request.get_hyper_request().uri()) + .path(), + ) { + for location_wrap in host_wsgi_application_wrap.locations.iter() { + if match_location(&location_wrap.path, &path_decoded) { + wsgi_application = Some(location_wrap.wsgi_application.clone()); + wsgi_path = location_wrap.wsgi_path.clone(); + break; + } + } + } + break; + } + } + + let request_path = hyper_request.uri().path(); + let mut request_path_bytes = request_path.bytes(); + if request_path_bytes.len() < 1 || request_path_bytes.nth(0) != Some(b'/') { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + + if let Some(wsgi_application) = wsgi_application { + let wsgi_path = wsgi_path.unwrap_or("/".to_string()); + let mut canonical_wsgi_path: &str = &wsgi_path; + if canonical_wsgi_path.bytes().last() == Some(b'/') { + canonical_wsgi_path = &canonical_wsgi_path[..(canonical_wsgi_path.len() - 1)]; + } + + let request_path_with_slashes = match request_path == canonical_wsgi_path { + true => format!("{}/", request_path), + false => request_path.to_string(), + }; + if let Some(stripped_request_path) = + request_path_with_slashes.strip_prefix(canonical_wsgi_path) + { + let wwwroot_yaml = &config["wwwroot"]; + let wwwroot = wwwroot_yaml.as_str().unwrap_or("/nonexistent"); + + let wwwroot_unknown = PathBuf::from(wwwroot); + let wwwroot_pathbuf = match wwwroot_unknown.as_path().is_absolute() { + true => wwwroot_unknown, + false => match fs::canonicalize(&wwwroot_unknown).await { + Ok(pathbuf) => pathbuf, + Err(_) => wwwroot_unknown, + }, + }; + let wwwroot = wwwroot_pathbuf.as_path(); + + let mut relative_path = &request_path[1..]; + while relative_path.as_bytes().first().copied() == Some(b'/') { + relative_path = &relative_path[1..]; + } + + let decoded_relative_path = match urlencoding::decode(relative_path) { + Ok(path) => path.to_string(), + Err(_) => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let joined_pathbuf = wwwroot.join(decoded_relative_path); + let execute_pathbuf = joined_pathbuf; + let execute_path_info = stripped_request_path + .strip_prefix("/") + .map(|s| s.to_string()); + + return execute_wsgi_with_environment_variables( + request, + socket_data, + error_logger, + wwwroot, + execute_pathbuf, + execute_path_info, + config["serverAdministratorEmail"].as_str(), + wsgi_application, + ) + .await; + } + } + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} + +struct ResponseHead { + status: StatusCode, + headers: Option, + is_set: bool, + is_sent: bool, +} + +impl ResponseHead { + fn new() -> Self { + Self { + status: StatusCode::OK, + headers: None, + is_set: false, + is_sent: false, + } + } +} + +#[allow(clippy::too_many_arguments)] +async fn execute_wsgi_with_environment_variables( + request: RequestData, + socket_data: &SocketData, + error_logger: &ErrorLogger, + wwwroot: &Path, + execute_pathbuf: PathBuf, + path_info: Option, + server_administrator_email: Option<&str>, + wsgi_application: Arc>, +) -> Result> { + let mut environment_variables: LinkedHashMap = LinkedHashMap::new(); + + let hyper_request = request.get_hyper_request(); + let original_request_uri = request.get_original_url().unwrap_or(hyper_request.uri()); + + if let Some(auth_user) = request.get_auth_user() { + if let Some(authorization) = hyper_request.headers().get(header::AUTHORIZATION) { + let authorization_value = String::from_utf8_lossy(authorization.as_bytes()).to_string(); + let mut authorization_value_split = authorization_value.split(" "); + if let Some(authorization_type) = authorization_value_split.next() { + environment_variables.insert("AUTH_TYPE".to_string(), authorization_type.to_string()); + } + } + environment_variables.insert("REMOTE_USER".to_string(), auth_user.to_string()); + } + + environment_variables.insert( + "QUERY_STRING".to_string(), + match hyper_request.uri().query() { + Some(query) => query.to_string(), + None => "".to_string(), + }, + ); + + environment_variables.insert("SERVER_SOFTWARE".to_string(), SERVER_SOFTWARE.to_string()); + environment_variables.insert( + "SERVER_PROTOCOL".to_string(), + match hyper_request.version() { + hyper::Version::HTTP_09 => "HTTP/0.9".to_string(), + hyper::Version::HTTP_10 => "HTTP/1.0".to_string(), + hyper::Version::HTTP_11 => "HTTP/1.1".to_string(), + hyper::Version::HTTP_2 => "HTTP/2.0".to_string(), + hyper::Version::HTTP_3 => "HTTP/3.0".to_string(), + _ => "HTTP/Unknown".to_string(), + }, + ); + environment_variables.insert( + "SERVER_PORT".to_string(), + socket_data.local_addr.port().to_string(), + ); + environment_variables.insert( + "SERVER_ADDR".to_string(), + socket_data.local_addr.ip().to_canonical().to_string(), + ); + if let Some(server_administrator_email) = server_administrator_email { + environment_variables.insert( + "SERVER_ADMIN".to_string(), + server_administrator_email.to_string(), + ); + } + if let Some(host) = hyper_request.headers().get(header::HOST) { + environment_variables.insert( + "SERVER_NAME".to_string(), + String::from_utf8_lossy(host.as_bytes()).to_string(), + ); + } + + environment_variables.insert( + "DOCUMENT_ROOT".to_string(), + wwwroot.to_string_lossy().to_string(), + ); + environment_variables.insert( + "PATH_INFO".to_string(), + match &path_info { + Some(path_info) => format!("/{}", path_info), + None => "".to_string(), + }, + ); + environment_variables.insert( + "PATH_TRANSLATED".to_string(), + match &path_info { + Some(path_info) => { + let mut path_translated = execute_pathbuf.clone(); + path_translated.push(path_info); + path_translated.to_string_lossy().to_string() + } + None => "".to_string(), + }, + ); + environment_variables.insert( + "REQUEST_METHOD".to_string(), + hyper_request.method().to_string(), + ); + environment_variables.insert("GATEWAY_INTERFACE".to_string(), "CGI/1.1".to_string()); + environment_variables.insert( + "REQUEST_URI".to_string(), + format!( + "{}{}", + original_request_uri.path(), + match original_request_uri.query() { + Some(query) => format!("?{}", query), + None => String::from(""), + } + ), + ); + + environment_variables.insert( + "REMOTE_PORT".to_string(), + socket_data.remote_addr.port().to_string(), + ); + environment_variables.insert( + "REMOTE_ADDR".to_string(), + socket_data.remote_addr.ip().to_canonical().to_string(), + ); + + environment_variables.insert( + "SCRIPT_FILENAME".to_string(), + execute_pathbuf.to_string_lossy().to_string(), + ); + if let Ok(script_path) = execute_pathbuf.as_path().strip_prefix(wwwroot) { + environment_variables.insert( + "SCRIPT_NAME".to_string(), + format!( + "/{}", + match cfg!(windows) { + true => script_path.to_string_lossy().to_string().replace("\\", "/"), + false => script_path.to_string_lossy().to_string(), + } + ), + ); + } + + if socket_data.encrypted { + environment_variables.insert("HTTPS".to_string(), "ON".to_string()); + } + + let mut content_length_set = false; + for (header_name, header_value) in hyper_request.headers().iter() { + let env_header_name = match *header_name { + header::CONTENT_LENGTH => { + content_length_set = true; + "CONTENT_LENGTH".to_string() + } + header::CONTENT_TYPE => "CONTENT_TYPE".to_string(), + _ => { + let mut result = String::new(); + + result.push_str("HTTP_"); + + for c in header_name.as_str().to_uppercase().chars() { + if c.is_alphanumeric() { + result.push(c); + } else { + result.push('_'); + } + } + + result + } + }; + if environment_variables.contains_key(&env_header_name) { + let value = environment_variables.get_mut(&env_header_name); + if let Some(value) = value { + if env_header_name == "HTTP_COOKIE" { + value.push_str("; "); + } else { + // See https://stackoverflow.com/a/1801191 + value.push_str(", "); + } + value.push_str(String::from_utf8_lossy(header_value.as_bytes()).as_ref()); + } else { + environment_variables.insert( + env_header_name, + String::from_utf8_lossy(header_value.as_bytes()).to_string(), + ); + } + } else { + environment_variables.insert( + env_header_name, + String::from_utf8_lossy(header_value.as_bytes()).to_string(), + ); + } + } + + if !content_length_set { + environment_variables.insert("CONTENT_LENGTH".to_string(), "0".to_string()); + } + + let (hyper_request, _, _) = request.into_parts(); + + execute_wsgi( + hyper_request, + error_logger, + wsgi_application, + environment_variables, + ) + .await +} + +async fn execute_wsgi( + hyper_request: HyperRequest, + error_logger: &ErrorLogger, + wsgi_application: Arc>, + environment_variables: LinkedHashMap, +) -> Result> { + let (_, body) = hyper_request.into_parts(); + let body_reader = StreamReader::new(body.into_data_stream().map_err(std::io::Error::other)); + let wsgi_head = Arc::new(Mutex::new(ResponseHead::new())); + let wsgi_head_clone = wsgi_head.clone(); + let error_logger_owned = error_logger.to_owned(); + let body_iterator = tokio::task::spawn_blocking(move || { + Python::with_gil(move |py| -> PyResult> { + let start_response = PyCFunction::new_closure( + py, + None, + None, + move |args: &Bound<'_, PyTuple>, kwargs: Option<&Bound<'_, PyDict>>| -> PyResult<_> { + let args_native = args.extract::<(String, Vec<(String, String)>)>()?; + let exc_info = kwargs.map_or(Ok(None), |kwargs| { + let exc_info = kwargs.get_item("exc_info"); + if let Ok(Some(exc_info)) = exc_info { + if exc_info.is_none() { + Ok(None) + } else { + Ok(Some(exc_info)) + } + } else { + exc_info + } + })?; + let mut wsgi_head_locked = wsgi_head_clone.blocking_lock(); + if let Some(exc_info) = exc_info { + if wsgi_head_locked.is_sent { + let exc_info_tuple = exc_info.downcast::()?; + let exc_info_exception = exc_info_tuple + .get_item(1)? + .getattr("with_traceback")? + .call((exc_info_tuple.get_item(2)?,), None)? + .downcast::()? + .clone(); + Err(exc_info_exception)? + } + } else if wsgi_head_locked.is_set { + Err(PyAssertionError::new_err("Headers already set"))? + } + let status_code_string_option = args_native.0.split(" ").next(); + if let Some(status_code_string) = status_code_string_option { + wsgi_head_locked.status = + StatusCode::from_u16(status_code_string.parse()?).map_err(|e| anyhow::anyhow!(e))?; + } else { + Err(anyhow::anyhow!("Can't extract status code"))?; + } + let mut header_map = HeaderMap::new(); + for header in args_native.1 { + header_map.append( + HeaderName::from_str(&header.0).map_err(|e| anyhow::anyhow!(e))?, + HeaderValue::from_str(&header.1).map_err(|e| anyhow::anyhow!(e))?, + ); + } + wsgi_head_locked.headers = Some(header_map); + wsgi_head_locked.is_set = true; + Ok(()) + }, + )?; + let mut environment: HashMap> = HashMap::new(); + let is_https = environment_variables.contains_key("HTTPS"); + let content_length = if let Some(content_length) = environment_variables.get("CONTENT_LENGTH") + { + content_length.parse::().ok() + } else { + None + }; + for (environment_variable, environment_variable_value) in environment_variables { + environment.insert( + environment_variable, + PyString::new(py, &environment_variable_value).into_any(), + ); + } + environment.insert( + "wsgi.version".to_string(), + PyTuple::new(py, [1, 0])?.into_any(), + ); + environment.insert( + "wsgi.url_scheme".to_string(), + PyString::new(py, if is_https { "https" } else { "http" }).into_any(), + ); + environment.insert( + "wsgi.input".to_string(), + (if let Some(content_length) = content_length { + WsgiInputStream::new(body_reader.take(content_length)) + } else { + WsgiInputStream::new(body_reader) + }) + .into_pyobject(py)? + .into_any(), + ); + environment.insert( + "wsgi.errors".to_string(), + WsgiErrorStream::new(error_logger_owned) + .into_pyobject(py)? + .into_any(), + ); + environment.insert( + "wsgi.multithread".to_string(), + PyBool::new(py, true).as_any().clone(), + ); + environment.insert( + "wsgi.multiprocess".to_string(), + PyBool::new(py, false).as_any().clone(), + ); + environment.insert( + "wsgi.run_once".to_string(), + PyBool::new(py, false).as_any().clone(), + ); + let body_unknown = wsgi_application.call(py, (environment, start_response), None)?; + let body_iterator = body_unknown + .downcast_bound::(py)? + .clone() + .unbind(); + Ok(body_iterator) + }) + }) + .await??; + + let wsgi_head_clone = wsgi_head.clone(); + let mut response_stream = + futures_util::stream::unfold(Arc::new(body_iterator), move |body_iterator_arc| { + let wsgi_head_clone = wsgi_head_clone.clone(); + Box::pin(async move { + let body_iterator_arc_clone = body_iterator_arc.clone(); + let blocking_thread_result = tokio::task::spawn_blocking(move || { + Python::with_gil(|py| -> PyResult> { + let mut body_iterator_bound = body_iterator_arc_clone.bind(py).clone(); + if let Some(body_chunk) = body_iterator_bound.next() { + Ok(Some(Bytes::from(body_chunk?.extract::>()?))) + } else { + Ok(None) + } + }) + }) + .await; + + match blocking_thread_result { + Err(error) => Some((Err(std::io::Error::other(error)), body_iterator_arc)), + Ok(Err(error)) => Some((Err(std::io::Error::other(error)), body_iterator_arc)), + Ok(Ok(None)) => None, + Ok(Ok(Some(chunk))) => { + let wsgi_head_locked = wsgi_head_clone.lock().await; + if !wsgi_head_locked.is_set { + Some(( + Err(std::io::Error::other( + "The \"start_response\" function hasn't been called.", + )), + body_iterator_arc, + )) + } else { + Some((Ok(chunk), body_iterator_arc)) + } + } + } + }) + }); + + let first_chunk = response_stream.next().await; + let response_body = if let Some(Err(first_chunk_error)) = first_chunk { + Err(first_chunk_error)? + } else if let Some(Ok(first_chunk)) = first_chunk { + let response_stream_first_item = futures_util::stream::once(async move { Ok(first_chunk) }); + let response_stream_combined = response_stream_first_item.chain(response_stream); + let stream_body = StreamBody::new(response_stream_combined.map_ok(Frame::data)); + + BodyExt::boxed(stream_body) + } else { + BodyExt::boxed(Empty::new().map_err(|e| match e {})) + }; + + let mut wsgi_head_locked = wsgi_head.lock().await; + let mut hyper_response = Response::new(response_body); + *hyper_response.status_mut() = wsgi_head_locked.status; + if let Some(headers) = wsgi_head_locked.headers.take() { + *hyper_response.headers_mut() = headers; + } + wsgi_head_locked.is_sent = true; + + Ok( + ResponseData::builder_without_request() + .response(hyper_response) + .build(), + ) +} diff --git a/ferron/src/optional_modules/wsgid.rs b/ferron/src/optional_modules/wsgid.rs new file mode 100644 index 0000000000000000000000000000000000000000..df9c0f3fd2309b8daf83a8f801f1cfba46c1f9f9 --- /dev/null +++ b/ferron/src/optional_modules/wsgid.rs @@ -0,0 +1,1035 @@ +#[cfg(not(unix))] +compile_error!("This module is supported only on Unix and Unix-like systems."); + +use std::collections::HashMap; +use std::error::Error; +use std::io::{BufReader, Read}; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::Arc; +use std::thread; + +use crate::ferron_common::{ + ErrorLogger, HyperRequest, HyperUpgraded, RequestData, ResponseData, ServerConfig, ServerModule, + ServerModuleHandlers, SocketData, +}; +use crate::ferron_common::{HyperResponse, WithRuntime}; +use crate::ferron_res::server_software::SERVER_SOFTWARE; +use crate::ferron_util::ip_match::ip_match; +use crate::ferron_util::match_hostname::match_hostname; +use crate::ferron_util::match_location::match_location; +use crate::ferron_util::preforked_process_pool::{ + read_ipc_message, read_ipc_message_async, write_ipc_message, write_ipc_message_async, + PreforkedProcessPool, +}; +use crate::ferron_util::wsgi_load_application::load_wsgi_application; +use crate::ferron_util::wsgid_body_reader::WsgidBodyReader; +use crate::ferron_util::wsgid_error_stream::WsgidErrorStream; +use crate::ferron_util::wsgid_input_stream::WsgidInputStream; +use crate::ferron_util::wsgid_message_structs::{ + ProcessPoolToServerMessage, ServerToProcessPoolMessage, +}; +use crate::ferron_util::wsgid_structs::{WsgidApplicationLocationWrap, WsgidApplicationWrap}; +use async_trait::async_trait; +use futures_util::{StreamExt, TryStreamExt}; +use hashlink::LinkedHashMap; +use http::{HeaderMap, HeaderName, HeaderValue, StatusCode}; +use http_body_util::{BodyExt, Empty, StreamBody}; +use hyper::body::{Bytes, Frame}; +use hyper::header; +use hyper::Response; +use hyper_tungstenite::HyperWebsocket; +use interprocess::unnamed_pipe::{Recver, Sender}; +use pyo3::exceptions::{PyAssertionError, PyException}; +use pyo3::prelude::*; +use pyo3::types::{PyBool, PyCFunction, PyDict, PyIterator, PyString, PyTuple}; +//use postcard::{DeOptions, SerOptions}; +use tokio::fs; +use tokio::runtime::Handle; +use tokio::sync::Mutex; + +struct ResponseHead { + status: u16, + headers: Option>>, + is_set: bool, + is_sent: bool, +} + +impl ResponseHead { + fn new() -> Self { + Self { + status: 200, + headers: None, + is_set: false, + is_sent: false, + } + } +} + +fn wsgi_pool_fn(tx: Sender, rx: Recver, wsgi_script_path: PathBuf) { + let wsgi_application_result: Result, Box> = + load_wsgi_application(wsgi_script_path.as_path(), false); + let mut body_iterators = HashMap::new(); + let mut application_id = 0; + let mut wsgi_head = Arc::new(Mutex::new(ResponseHead::new())); + let rx_mutex = Arc::new(Mutex::new(rx)); + let tx_mutex = Arc::new(Mutex::new(tx)); + + loop { + let received_raw_message = match read_ipc_message(&mut rx_mutex.blocking_lock()) { + Ok(message) => message, + Err(_) => break, + }; + + let received_message = + match postcard::from_bytes::(&received_raw_message) { + Ok(message) => message, + Err(_) => continue, + }; + + if let Some(error) = (|| -> Result<(), Box> { + let wsgi_application = wsgi_application_result + .as_ref() + .map_err(|x| anyhow::anyhow!(x.to_string()))?; + if let Some(environment_variables) = received_message.environment_variables { + wsgi_head = Arc::new(Mutex::new(ResponseHead::new())); + let wsgi_head_clone = wsgi_head.clone(); + let tx_mutex_clone = tx_mutex.clone(); + let rx_mutex_clone = rx_mutex.clone(); + let body_iterator = Python::with_gil(move |py| -> PyResult> { + let start_response = PyCFunction::new_closure( + py, + None, + None, + move |args: &Bound<'_, PyTuple>, kwargs: Option<&Bound<'_, PyDict>>| -> PyResult<_> { + let args_native = args.extract::<(String, Vec<(String, String)>)>()?; + let exc_info = kwargs.map_or(Ok(None), |kwargs| { + let exc_info = kwargs.get_item("exc_info"); + if let Ok(Some(exc_info)) = exc_info { + if exc_info.is_none() { + Ok(None) + } else { + Ok(Some(exc_info)) + } + } else { + exc_info + } + })?; + let mut wsgi_head_locked = wsgi_head_clone.blocking_lock(); + if let Some(exc_info) = exc_info { + if wsgi_head_locked.is_sent { + let exc_info_tuple = exc_info.downcast::()?; + let exc_info_exception = exc_info_tuple + .get_item(1)? + .getattr("with_traceback")? + .call((exc_info_tuple.get_item(2)?,), None)? + .downcast::()? + .clone(); + Err(exc_info_exception)? + } + } else if wsgi_head_locked.is_set { + Err(PyAssertionError::new_err("Headers already set"))? + } + let status_code_string_option = args_native.0.split(" ").next(); + if let Some(status_code_string) = status_code_string_option { + wsgi_head_locked.status = status_code_string + .parse() + .map_err(|e: std::num::ParseIntError| anyhow::anyhow!(e))?; + } else { + Err(anyhow::anyhow!("Can't extract status code"))?; + } + let mut header_map: LinkedHashMap> = LinkedHashMap::new(); + for header in args_native.1 { + let header_name = header.0.to_lowercase(); + let header_value = header.1; + if let Some(header_values) = header_map.get_mut(&header_name) { + header_values.push(header_value); + } else { + header_map.insert(header_name, vec![header_value]); + } + } + wsgi_head_locked.headers = Some(header_map); + wsgi_head_locked.is_set = true; + Ok(()) + }, + )?; + let mut environment: HashMap> = HashMap::new(); + let is_https = environment_variables.contains_key("HTTPS"); + let content_length = + if let Some(content_length) = environment_variables.get("CONTENT_LENGTH") { + content_length.parse::().ok() + } else { + None + }; + for (environment_variable, environment_variable_value) in environment_variables { + environment.insert( + environment_variable, + PyString::new(py, &environment_variable_value).into_any(), + ); + } + environment.insert( + "wsgi.version".to_string(), + PyTuple::new(py, [1, 0])?.into_any(), + ); + environment.insert( + "wsgi.url_scheme".to_string(), + PyString::new(py, if is_https { "https" } else { "http" }).into_any(), + ); + environment.insert( + "wsgi.input".to_string(), + (if let Some(content_length) = content_length { + WsgidInputStream::new( + BufReader::new(WsgidBodyReader::new( + tx_mutex_clone.clone(), + rx_mutex_clone.clone(), + )) + .take(content_length), + ) + } else { + WsgidInputStream::new(BufReader::new(WsgidBodyReader::new( + tx_mutex_clone.clone(), + rx_mutex_clone.clone(), + ))) + }) + .into_pyobject(py)? + .into_any(), + ); + environment.insert( + "wsgi.errors".to_string(), + WsgidErrorStream::new(tx_mutex_clone.clone()) + .into_pyobject(py)? + .into_any(), + ); + environment.insert( + "wsgi.multithread".to_string(), + PyBool::new(py, false).as_any().clone(), + ); + environment.insert( + "wsgi.multiprocess".to_string(), + PyBool::new(py, true).as_any().clone(), + ); + environment.insert( + "wsgi.run_once".to_string(), + PyBool::new(py, false).as_any().clone(), + ); + let body_unknown = wsgi_application.call(py, (environment, start_response), None)?; + let body_iterator = body_unknown + .downcast_bound::(py)? + .clone() + .unbind(); + Ok(body_iterator) + })?; + let current_application_id = application_id; + body_iterators.insert(current_application_id, Arc::new(body_iterator)); + application_id += 1; + write_ipc_message( + &mut tx_mutex.blocking_lock(), + &postcard::to_allocvec::(&ProcessPoolToServerMessage { + application_id: Some(current_application_id), + status_code: None, + headers: None, + body_chunk: None, + error_log_line: None, + error_message: None, + requests_body_chunk: false, + })?, + )? + } else if received_message.requests_body_chunk { + if let Some(application_id) = received_message.application_id { + if let Some(body_iterator_arc) = body_iterators.get(&application_id) { + let wsgi_head_clone = wsgi_head.clone(); + let body_iterator_arc_clone = body_iterator_arc.clone(); + let body_chunk_result = Python::with_gil(|py| -> PyResult>> { + let mut body_iterator_bound = body_iterator_arc_clone.bind(py).clone(); + if let Some(body_chunk) = body_iterator_bound.next() { + Ok(Some(body_chunk?.extract::>()?)) + } else { + Ok(None) + } + }); + + let body_chunk = (match body_chunk_result { + Err(error) => Err(std::io::Error::other(error)), + Ok(None) => Ok(None), + Ok(Some(chunk)) => { + let wsgi_head_locked = wsgi_head_clone.blocking_lock(); + if !wsgi_head_locked.is_set { + Err(std::io::Error::other( + "The \"start_response\" function hasn't been called.", + )) + } else { + Ok(Some(chunk)) + } + } + })?; + + let status_code; + let headers; + + let mut wsgi_head_locked = wsgi_head_clone.blocking_lock(); + if wsgi_head_locked.is_sent { + status_code = None; + headers = None; + } else { + status_code = Some(wsgi_head_locked.status); + headers = wsgi_head_locked.headers.take(); + wsgi_head_locked.is_sent = true; + } + drop(wsgi_head_locked); + + if body_chunk.is_none() { + body_iterators.remove(&application_id); + } + + write_ipc_message( + &mut tx_mutex.blocking_lock(), + &postcard::to_allocvec::(&ProcessPoolToServerMessage { + application_id: None, + status_code, + headers, + body_chunk, + error_log_line: None, + error_message: None, + requests_body_chunk: false, + })?, + )? + } else { + Err(anyhow::anyhow!("The WSGI request wasn't initialized"))? + } + } else { + Err(anyhow::anyhow!("The WSGI request wasn't initialized"))? + } + } + + Ok(()) + })() + .err() + { + if write_ipc_message( + &mut tx_mutex.blocking_lock(), + &postcard::to_allocvec::(&ProcessPoolToServerMessage { + application_id: None, + status_code: None, + headers: None, + body_chunk: None, + error_log_line: None, + error_message: Some(error.to_string()), + requests_body_chunk: false, + }) + .unwrap_or_default(), + ) + .is_err() + { + break; + } + } + } +} + +fn init_wsgi_process_pool( + wsgi_script_path: PathBuf, +) -> Result> { + let available_parallelism = thread::available_parallelism()?.get(); + // Safety: The function depends on `nix::unistd::fork`, which is executed before any threads are spawned. + // The forking function is safe to call for single-threaded applications. + unsafe { + PreforkedProcessPool::new(available_parallelism, move |tx, rx| { + let wsgi_script_path_clone = wsgi_script_path.clone(); + wsgi_pool_fn(tx, rx, wsgi_script_path_clone) + }) + } +} + +pub fn server_module_init( + config: &ServerConfig, +) -> Result, Box> { + let mut global_wsgi_process_pool = None; + let mut host_wsgi_process_pools = Vec::new(); + if let Some(wsgi_process_pool_path) = config["global"]["wsgidApplicationPath"].as_str() { + global_wsgi_process_pool = Some(Arc::new(init_wsgi_process_pool(PathBuf::from_str( + wsgi_process_pool_path, + )?)?)); + } + let global_wsgi_path = config["global"]["wsgidPath"] + .as_str() + .map(|s| s.to_string()); + + if let Some(hosts) = config["hosts"].as_vec() { + for host_yaml in hosts.iter() { + let domain = host_yaml["domain"].as_str().map(String::from); + let ip = host_yaml["ip"].as_str().map(String::from); + let mut locations = Vec::new(); + if let Some(locations_yaml) = host_yaml["locations"].as_vec() { + for location_yaml in locations_yaml.iter() { + if let Some(path_str) = location_yaml["path"].as_str() { + let path = String::from(path_str); + if let Some(wsgi_process_pool_path) = location_yaml["wsgidApplicationPath"].as_str() { + locations.push(WsgidApplicationLocationWrap::new( + path, + Arc::new(init_wsgi_process_pool(PathBuf::from_str( + wsgi_process_pool_path, + )?)?), + location_yaml["wsgidPath"].as_str().map(|s| s.to_string()), + )); + } + } + } + } + if let Some(wsgi_process_pool_path) = host_yaml["wsgidApplicationPath"].as_str() { + host_wsgi_process_pools.push(WsgidApplicationWrap::new( + domain, + ip, + Some(Arc::new(init_wsgi_process_pool(PathBuf::from_str( + wsgi_process_pool_path, + )?)?)), + host_yaml["wsgiPath"].as_str().map(|s| s.to_string()), + locations, + )); + } else if !locations.is_empty() { + host_wsgi_process_pools.push(WsgidApplicationWrap::new( + domain, + ip, + None, + host_yaml["wsgiPath"].as_str().map(|s| s.to_string()), + locations, + )); + } + } + } + + Ok(Box::new(WsgidModule::new( + global_wsgi_process_pool, + global_wsgi_path, + Arc::new(host_wsgi_process_pools), + ))) +} + +struct WsgidModule { + global_wsgi_process_pool: Option>, + global_wsgi_path: Option, + host_wsgi_process_pools: Arc>, +} + +impl WsgidModule { + fn new( + global_wsgi_process_pool: Option>, + global_wsgi_path: Option, + host_wsgi_process_pools: Arc>, + ) -> Self { + Self { + global_wsgi_process_pool, + global_wsgi_path, + host_wsgi_process_pools, + } + } +} + +impl ServerModule for WsgidModule { + fn get_handlers(&self, handle: Handle) -> Box { + Box::new(WsgidModuleHandlers { + handle, + global_wsgi_process_pool: self.global_wsgi_process_pool.clone(), + global_wsgi_path: self.global_wsgi_path.clone(), + host_wsgi_process_pools: self.host_wsgi_process_pools.clone(), + }) + } +} + +struct WsgidModuleHandlers { + handle: Handle, + global_wsgi_process_pool: Option>, + global_wsgi_path: Option, + host_wsgi_process_pools: Arc>, +} + +#[async_trait] +impl ServerModuleHandlers for WsgidModuleHandlers { + async fn request_handler( + &mut self, + request: RequestData, + config: &ServerConfig, + socket_data: &SocketData, + error_logger: &ErrorLogger, + ) -> Result> { + WithRuntime::new(self.handle.clone(), async move { + let hyper_request = request.get_hyper_request(); + + // Use .take() instead of .clone(), since the values in Options will only be used once. + let mut wsgi_process_pool = self.global_wsgi_process_pool.take(); + let mut wsgi_path = self.global_wsgi_path.take(); + + // Should have used a HashMap instead of iterating over an array for better performance... + for host_wsgi_process_pool_wrap in self.host_wsgi_process_pools.iter() { + if match_hostname( + match &host_wsgi_process_pool_wrap.domain { + Some(value) => Some(value as &str), + None => None, + }, + match hyper_request.headers().get(header::HOST) { + Some(value) => value.to_str().ok(), + None => None, + }, + ) && match &host_wsgi_process_pool_wrap.ip { + Some(value) => ip_match(value as &str, socket_data.remote_addr.ip()), + None => true, + } { + wsgi_process_pool = host_wsgi_process_pool_wrap.wsgi_process_pool.clone(); + wsgi_path = host_wsgi_process_pool_wrap.wsgi_path.clone(); + if let Ok(path_decoded) = urlencoding::decode( + request + .get_original_url() + .unwrap_or(request.get_hyper_request().uri()) + .path(), + ) { + for location_wrap in host_wsgi_process_pool_wrap.locations.iter() { + if match_location(&location_wrap.path, &path_decoded) { + wsgi_process_pool = Some(location_wrap.wsgi_process_pool.clone()); + wsgi_path = location_wrap.wsgi_path.clone(); + break; + } + } + } + break; + } + } + + let request_path = hyper_request.uri().path(); + let mut request_path_bytes = request_path.bytes(); + if request_path_bytes.len() < 1 || request_path_bytes.nth(0) != Some(b'/') { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + + if let Some(wsgi_process_pool) = wsgi_process_pool { + let wsgi_path = wsgi_path.unwrap_or("/".to_string()); + let mut canonical_wsgi_path: &str = &wsgi_path; + if canonical_wsgi_path.bytes().last() == Some(b'/') { + canonical_wsgi_path = &canonical_wsgi_path[..(canonical_wsgi_path.len() - 1)]; + } + + let request_path_with_slashes = match request_path == canonical_wsgi_path { + true => format!("{}/", request_path), + false => request_path.to_string(), + }; + if let Some(stripped_request_path) = + request_path_with_slashes.strip_prefix(canonical_wsgi_path) + { + let wwwroot_yaml = &config["wwwroot"]; + let wwwroot = wwwroot_yaml.as_str().unwrap_or("/nonexistent"); + + let wwwroot_unknown = PathBuf::from(wwwroot); + let wwwroot_pathbuf = match wwwroot_unknown.as_path().is_absolute() { + true => wwwroot_unknown, + false => match fs::canonicalize(&wwwroot_unknown).await { + Ok(pathbuf) => pathbuf, + Err(_) => wwwroot_unknown, + }, + }; + let wwwroot = wwwroot_pathbuf.as_path(); + + let mut relative_path = &request_path[1..]; + while relative_path.as_bytes().first().copied() == Some(b'/') { + relative_path = &relative_path[1..]; + } + + let decoded_relative_path = match urlencoding::decode(relative_path) { + Ok(path) => path.to_string(), + Err(_) => { + return Ok( + ResponseData::builder(request) + .status(StatusCode::BAD_REQUEST) + .build(), + ); + } + }; + + let joined_pathbuf = wwwroot.join(decoded_relative_path); + let execute_pathbuf = joined_pathbuf; + let execute_path_info = stripped_request_path + .strip_prefix("/") + .map(|s| s.to_string()); + + return execute_wsgi_with_environment_variables( + request, + socket_data, + error_logger, + wwwroot, + execute_pathbuf, + execute_path_info, + config["serverAdministratorEmail"].as_str(), + wsgi_process_pool, + ) + .await; + } + } + Ok(ResponseData::builder(request).build()) + }) + .await + } + + async fn proxy_request_handler( + &mut self, + request: RequestData, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result> { + Ok(ResponseData::builder(request).build()) + } + + async fn response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn proxy_response_modifying_handler( + &mut self, + response: HyperResponse, + ) -> Result> { + Ok(response) + } + + async fn connect_proxy_request_handler( + &mut self, + _upgraded_request: HyperUpgraded, + _connect_address: &str, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_connect_proxy_requests(&mut self) -> bool { + false + } + + async fn websocket_request_handler( + &mut self, + _websocket: HyperWebsocket, + _uri: &hyper::Uri, + _config: &ServerConfig, + _socket_data: &SocketData, + _error_logger: &ErrorLogger, + ) -> Result<(), Box> { + Ok(()) + } + + fn does_websocket_requests(&mut self, _config: &ServerConfig, _socket_data: &SocketData) -> bool { + false + } +} + +struct ResponseHeadHyper { + status: StatusCode, + headers: Option, +} + +impl ResponseHeadHyper { + fn new() -> Self { + Self { + status: StatusCode::OK, + headers: None, + } + } +} + +#[allow(clippy::too_many_arguments)] +async fn execute_wsgi_with_environment_variables( + request: RequestData, + socket_data: &SocketData, + error_logger: &ErrorLogger, + wwwroot: &Path, + execute_pathbuf: PathBuf, + path_info: Option, + server_administrator_email: Option<&str>, + wsgi_process_pool: Arc, +) -> Result> { + let mut environment_variables: LinkedHashMap = LinkedHashMap::new(); + + let hyper_request = request.get_hyper_request(); + let original_request_uri = request.get_original_url().unwrap_or(hyper_request.uri()); + + if let Some(auth_user) = request.get_auth_user() { + if let Some(authorization) = hyper_request.headers().get(header::AUTHORIZATION) { + let authorization_value = String::from_utf8_lossy(authorization.as_bytes()).to_string(); + let mut authorization_value_split = authorization_value.split(" "); + if let Some(authorization_type) = authorization_value_split.next() { + environment_variables.insert("AUTH_TYPE".to_string(), authorization_type.to_string()); + } + } + environment_variables.insert("REMOTE_USER".to_string(), auth_user.to_string()); + } + + environment_variables.insert( + "QUERY_STRING".to_string(), + match hyper_request.uri().query() { + Some(query) => query.to_string(), + None => "".to_string(), + }, + ); + + environment_variables.insert("SERVER_SOFTWARE".to_string(), SERVER_SOFTWARE.to_string()); + environment_variables.insert( + "SERVER_PROTOCOL".to_string(), + match hyper_request.version() { + hyper::Version::HTTP_09 => "HTTP/0.9".to_string(), + hyper::Version::HTTP_10 => "HTTP/1.0".to_string(), + hyper::Version::HTTP_11 => "HTTP/1.1".to_string(), + hyper::Version::HTTP_2 => "HTTP/2.0".to_string(), + hyper::Version::HTTP_3 => "HTTP/3.0".to_string(), + _ => "HTTP/Unknown".to_string(), + }, + ); + environment_variables.insert( + "SERVER_PORT".to_string(), + socket_data.local_addr.port().to_string(), + ); + environment_variables.insert( + "SERVER_ADDR".to_string(), + socket_data.local_addr.ip().to_canonical().to_string(), + ); + if let Some(server_administrator_email) = server_administrator_email { + environment_variables.insert( + "SERVER_ADMIN".to_string(), + server_administrator_email.to_string(), + ); + } + if let Some(host) = hyper_request.headers().get(header::HOST) { + environment_variables.insert( + "SERVER_NAME".to_string(), + String::from_utf8_lossy(host.as_bytes()).to_string(), + ); + } + + environment_variables.insert( + "DOCUMENT_ROOT".to_string(), + wwwroot.to_string_lossy().to_string(), + ); + environment_variables.insert( + "PATH_INFO".to_string(), + match &path_info { + Some(path_info) => format!("/{}", path_info), + None => "".to_string(), + }, + ); + environment_variables.insert( + "PATH_TRANSLATED".to_string(), + match &path_info { + Some(path_info) => { + let mut path_translated = execute_pathbuf.clone(); + path_translated.push(path_info); + path_translated.to_string_lossy().to_string() + } + None => "".to_string(), + }, + ); + environment_variables.insert( + "REQUEST_METHOD".to_string(), + hyper_request.method().to_string(), + ); + environment_variables.insert("GATEWAY_INTERFACE".to_string(), "CGI/1.1".to_string()); + environment_variables.insert( + "REQUEST_URI".to_string(), + format!( + "{}{}", + original_request_uri.path(), + match original_request_uri.query() { + Some(query) => format!("?{}", query), + None => String::from(""), + } + ), + ); + + environment_variables.insert( + "REMOTE_PORT".to_string(), + socket_data.remote_addr.port().to_string(), + ); + environment_variables.insert( + "REMOTE_ADDR".to_string(), + socket_data.remote_addr.ip().to_canonical().to_string(), + ); + + environment_variables.insert( + "SCRIPT_FILENAME".to_string(), + execute_pathbuf.to_string_lossy().to_string(), + ); + if let Ok(script_path) = execute_pathbuf.as_path().strip_prefix(wwwroot) { + environment_variables.insert( + "SCRIPT_NAME".to_string(), + format!( + "/{}", + match cfg!(windows) { + true => script_path.to_string_lossy().to_string().replace("\\", "/"), + false => script_path.to_string_lossy().to_string(), + } + ), + ); + } + + if socket_data.encrypted { + environment_variables.insert("HTTPS".to_string(), "ON".to_string()); + } + + let mut content_length_set = false; + for (header_name, header_value) in hyper_request.headers().iter() { + let env_header_name = match *header_name { + header::CONTENT_LENGTH => { + content_length_set = true; + "CONTENT_LENGTH".to_string() + } + header::CONTENT_TYPE => "CONTENT_TYPE".to_string(), + _ => { + let mut result = String::new(); + + result.push_str("HTTP_"); + + for c in header_name.as_str().to_uppercase().chars() { + if c.is_alphanumeric() { + result.push(c); + } else { + result.push('_'); + } + } + + result + } + }; + if environment_variables.contains_key(&env_header_name) { + let value = environment_variables.get_mut(&env_header_name); + if let Some(value) = value { + if env_header_name == "HTTP_COOKIE" { + value.push_str("; "); + } else { + // See https://stackoverflow.com/a/1801191 + value.push_str(", "); + } + value.push_str(String::from_utf8_lossy(header_value.as_bytes()).as_ref()); + } else { + environment_variables.insert( + env_header_name, + String::from_utf8_lossy(header_value.as_bytes()).to_string(), + ); + } + } else { + environment_variables.insert( + env_header_name, + String::from_utf8_lossy(header_value.as_bytes()).to_string(), + ); + } + } + + if !content_length_set { + environment_variables.insert("CONTENT_LENGTH".to_string(), "0".to_string()); + } + + let (hyper_request, _, _) = request.into_parts(); + + execute_wsgi( + hyper_request, + error_logger, + wsgi_process_pool, + environment_variables, + ) + .await +} + +async fn execute_wsgi( + hyper_request: HyperRequest, + error_logger: &ErrorLogger, + wsgi_process_pool: Arc, + environment_variables: LinkedHashMap, +) -> Result> { + let ipc_mutex = wsgi_process_pool + .obtain_process_with_init_async_ipc() + .await?; + let (_, body) = hyper_request.into_parts(); + let mut body_stream = body.into_data_stream().map_err(std::io::Error::other); + let application_id = { + let (tx, rx) = &mut *ipc_mutex.lock().await; + write_ipc_message_async( + tx, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: None, + environment_variables: Some(environment_variables), + body_chunk: None, + body_error_message: None, + requests_body_chunk: false, + })?, + ) + .await?; + + let application_id; + loop { + let received_message = + postcard::from_bytes::(&read_ipc_message_async(rx).await?)?; + + if let Some(error_message) = received_message.error_message { + Err(anyhow::anyhow!(error_message))? + } + + if let Some(application_id_obtained) = received_message.application_id { + application_id = application_id_obtained; + break; + } + + if let Some(error_log_line) = received_message.error_log_line { + error_logger.log(&error_log_line).await; + } else if received_message.requests_body_chunk { + let body_chunk; + let body_error_message; + match body_stream.next().await { + None => { + body_chunk = None; + body_error_message = None; + } + Some(Err(err)) => { + body_chunk = None; + body_error_message = Some(err.to_string()); + } + Some(Ok(chunk)) => { + body_chunk = Some(chunk.to_vec()); + body_error_message = None; + } + }; + write_ipc_message_async( + tx, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: None, + environment_variables: None, + body_chunk, + body_error_message, + requests_body_chunk: false, + })?, + ) + .await?; + } + } + + application_id + }; + + let wsgi_head = Arc::new(Mutex::new(ResponseHeadHyper::new())); + let wsgi_head_clone = wsgi_head.clone(); + let error_logger_arc = Arc::new(error_logger.clone()); + let body_stream_mutex = Arc::new(Mutex::new(body_stream)); + let mut response_stream = futures_util::stream::unfold(ipc_mutex, move |ipc_mutex| { + let wsgi_head_clone = wsgi_head_clone.clone(); + let error_logger_arc_clone = error_logger_arc.clone(); + let body_stream_mutex_clone = body_stream_mutex.clone(); + Box::pin(async move { + let ipc_mutex_borrowed = &ipc_mutex; + let chunk_result: Result, Box> = async { + let (tx, rx) = &mut *ipc_mutex_borrowed.lock().await; + write_ipc_message_async( + tx, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: Some(application_id), + environment_variables: None, + body_chunk: None, + body_error_message: None, + requests_body_chunk: true, + })?, + ) + .await?; + + loop { + let received_message = + postcard::from_bytes::(&read_ipc_message_async(rx).await?)?; + + if let Some(error_message) = received_message.error_message { + Err(anyhow::anyhow!(error_message))? + } else if let Some(body_chunk) = received_message.body_chunk { + if let Some(status_code) = received_message.status_code { + let mut wsgi_head_locked = wsgi_head_clone.lock().await; + wsgi_head_locked.status = StatusCode::from_u16(status_code)?; + if let Some(headers) = received_message.headers { + let mut header_map = HeaderMap::new(); + for (key, value) in headers { + for value in value { + header_map.append( + HeaderName::from_str(&key)?, + HeaderValue::from_bytes(value.as_bytes())?, + ); + } + } + wsgi_head_locked.headers = Some(header_map); + } + } + return Ok(Some(Bytes::from(body_chunk))); + } else if let Some(error_log_line) = received_message.error_log_line { + error_logger_arc_clone.log(&error_log_line).await; + } else if received_message.requests_body_chunk { + let body_chunk; + let body_error_message; + match body_stream_mutex_clone.lock().await.next().await { + None => { + body_chunk = None; + body_error_message = None; + } + Some(Err(err)) => { + body_chunk = None; + body_error_message = Some(err.to_string()); + } + Some(Ok(chunk)) => { + body_chunk = Some(chunk.to_vec()); + body_error_message = None; + } + }; + write_ipc_message_async( + tx, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: None, + environment_variables: None, + body_chunk, + body_error_message, + requests_body_chunk: false, + })?, + ) + .await?; + } else { + return Ok(None); + } + } + } + .await; + + match chunk_result { + Err(error) => Some((Err(std::io::Error::other(error.to_string())), ipc_mutex)), + Ok(None) => None, + Ok(Some(chunk)) => Some((Ok(chunk), ipc_mutex)), + } + }) + }); + + let first_chunk = response_stream.next().await; + let response_body = if let Some(Err(first_chunk_error)) = first_chunk { + Err(first_chunk_error)? + } else if let Some(Ok(first_chunk)) = first_chunk { + let response_stream_first_item = futures_util::stream::once(async move { Ok(first_chunk) }); + let response_stream_combined = response_stream_first_item.chain(response_stream); + let stream_body = StreamBody::new(response_stream_combined.map_ok(Frame::data)); + + BodyExt::boxed(stream_body) + } else { + BodyExt::boxed(Empty::new().map_err(|e| match e {})) + }; + + let mut wsgi_head_locked = wsgi_head.lock().await; + let mut hyper_response = Response::new(response_body); + *hyper_response.status_mut() = wsgi_head_locked.status; + if let Some(headers) = wsgi_head_locked.headers.take() { + *hyper_response.headers_mut() = headers; + } + + Ok( + ResponseData::builder_without_request() + .response(hyper_response) + .build(), + ) +} diff --git a/ferron/src/request_handler.rs b/ferron/src/request_handler.rs new file mode 100644 index 0000000000000000000000000000000000000000..08c1968f2bc9e48b2457d791afde594f39f332f4 --- /dev/null +++ b/ferron/src/request_handler.rs @@ -0,0 +1,2211 @@ +use std::convert::Infallible; +use std::net::{IpAddr, SocketAddr}; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use crate::ferron_res::server_software::SERVER_SOFTWARE; +use crate::ferron_util::combine_config::combine_config; +use crate::ferron_util::error_pages::generate_default_error_page; +use crate::ferron_util::url_sanitizer::sanitize_url; + +use crate::ferron_common::{ + ErrorLogger, LogMessage, RequestData, ServerModuleHandlers, SocketData, +}; +use async_channel::Sender; +use chrono::prelude::*; +use futures_util::TryStreamExt; +use http::header::CONTENT_TYPE; +use http_body_util::combinators::BoxBody; +use http_body_util::{BodyExt, Empty, Full, StreamBody}; +use hyper::body::{Body, Bytes, Frame}; +use hyper::header::{self, HeaderName, HeaderValue}; +use hyper::{HeaderMap, Method, Request, Response, StatusCode}; +use hyper_tungstenite::is_upgrade_request; +use rustls_acme::ResolvesServerCertAcme; +use tokio::fs; +use tokio::io::BufReader; +use tokio::time::timeout; +use tokio_util::io::ReaderStream; +use yaml_rust2::Yaml; + +async fn generate_error_response( + status_code: StatusCode, + config: &Yaml, + headers: &Option, +) -> Response> { + let bare_body = + generate_default_error_page(status_code, config["serverAdministratorEmail"].as_str()); + let mut content_length: Option = bare_body.len().try_into().ok(); + let mut response_body = Full::new(Bytes::from(bare_body)) + .map_err(|e| match e {}) + .boxed(); + + if let Some(error_pages) = config["errorPages"].as_vec() { + for error_page_yaml in error_pages { + if let Some(page_status_code) = error_page_yaml["scode"].as_i64() { + let page_status_code = match StatusCode::from_u16(match page_status_code.try_into() { + Ok(status_code) => status_code, + Err(_) => continue, + }) { + Ok(status_code) => status_code, + Err(_) => continue, + }; + if status_code != page_status_code { + continue; + } + if let Some(page_path) = error_page_yaml["path"].as_str() { + let file = fs::File::open(page_path).await; + + let file = match file { + Ok(file) => file, + Err(_) => continue, + }; + + content_length = match file.metadata().await { + Ok(metadata) => Some(metadata.len()), + Err(_) => None, + }; + + // Use BufReader for better performance. + let reader_stream = ReaderStream::new(BufReader::with_capacity(12800, file)); + + let stream_body = StreamBody::new(reader_stream.map_ok(Frame::data)); + let boxed_body = stream_body.boxed(); + + response_body = boxed_body; + + break; + } + } + } + } + + let mut response_builder = Response::builder().status(status_code); + + if let Some(headers) = headers { + let headers_iter = headers.iter(); + for (name, value) in headers_iter { + if name != header::CONTENT_TYPE && name != header::CONTENT_LENGTH { + response_builder = response_builder.header(name, value); + } + } + } + + if let Some(content_length) = content_length { + response_builder = response_builder.header(header::CONTENT_LENGTH, content_length); + } + response_builder = response_builder.header(header::CONTENT_TYPE, "text/html"); + + response_builder.body(response_body).unwrap_or_default() +} + +#[allow(clippy::too_many_arguments)] +async fn log_combined( + logger: &Sender, + client_ip: IpAddr, + auth_user: Option, + method: String, + request_path: String, + protocol: String, + status_code: u16, + content_length: Option, + referrer: Option, + user_agent: Option, +) { + let now: DateTime = Local::now(); + let formatted_time = now.format("%d/%b/%Y:%H:%M:%S %z").to_string(); + logger + .send(LogMessage::new( + format!( + "{} - {} [{}] \"{} {} {}\" {} {} {} {}", + client_ip, + match auth_user { + Some(auth_user) => auth_user, + None => String::from("-"), + }, + formatted_time, + method, + request_path, + protocol, + status_code, + match content_length { + Some(content_length) => format!("{}", content_length), + None => String::from("-"), + }, + match referrer { + Some(referrer) => format!( + "\"{}\"", + referrer.replace("\\", "\\\\").replace("\"", "\\\"") + ), + None => String::from("-"), + }, + match user_agent { + Some(user_agent) => format!( + "\"{}\"", + user_agent.replace("\\", "\\\\").replace("\"", "\\\"") + ), + None => String::from("-"), + }, + ), + false, + )) + .await + .unwrap_or_default(); +} + +#[allow(clippy::too_many_arguments)] +async fn request_handler_wrapped( + mut request: Request>, + remote_address: SocketAddr, + local_address: SocketAddr, + encrypted: bool, + config: Arc, + logger: Sender, + handlers_vec: Vec>, + acme_http01_resolver_option: Option>, + http3_alt_port: Option, +) -> Result>, Infallible> { + let is_proxy_request = match request.version() { + hyper::Version::HTTP_2 | hyper::Version::HTTP_3 => { + request.method() == hyper::Method::CONNECT && request.uri().host().is_some() + } + _ => request.uri().host().is_some(), + }; + let is_connect_proxy_request = request.method() == hyper::Method::CONNECT; + + // Collect request data for logging + let log_method = String::from(request.method().as_str()); + let log_request_path = match is_proxy_request { + true => request.uri().to_string(), + false => format!( + "{}{}", + request.uri().path(), + match request.uri().query() { + Some(query) => format!("?{}", query), + None => String::from(""), + } + ), + }; + let log_protocol = String::from(match request.version() { + hyper::Version::HTTP_09 => "HTTP/0.9", + hyper::Version::HTTP_10 => "HTTP/1.0", + hyper::Version::HTTP_11 => "HTTP/1.1", + hyper::Version::HTTP_2 => "HTTP/2.0", + hyper::Version::HTTP_3 => "HTTP/3.0", + _ => "HTTP/Unknown", + }); + let log_referrer = match request.headers().get(header::REFERER) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => Some(String::from(header_value)), + Err(_) => None, + }, + None => None, + }; + let log_user_agent = match request.headers().get(header::USER_AGENT) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => Some(String::from(header_value)), + Err(_) => None, + }, + None => None, + }; + let log_enabled = config["global"]["logFilePath"].as_str().is_some(); + let error_log_enabled = config["global"]["errorLogFilePath"].as_str().is_some(); + + // Construct SocketData + let mut socket_data = SocketData::new(remote_address, local_address, encrypted); + + match request.version() { + hyper::Version::HTTP_2 | hyper::Version::HTTP_3 => { + // Set "Host" request header for HTTP/2 and HTTP/3 connections + if let Some(authority) = request.uri().authority() { + let authority = authority.to_owned(); + let headers = request.headers_mut(); + if !headers.contains_key(header::HOST) { + if let Ok(authority_value) = HeaderValue::from_bytes(authority.as_str().as_bytes()) { + headers.append(header::HOST, authority_value); + } + } + } + + // Normalize the Cookie header for HTTP/2 and HTTP/3 + let mut cookie_normalized = String::new(); + let mut cookie_set = false; + let headers = request.headers_mut(); + for cookie in headers.get_all(header::COOKIE) { + if let Ok(cookie) = cookie.to_str() { + if cookie_set { + cookie_normalized.push_str("; "); + } + cookie_set = true; + cookie_normalized.push_str(cookie); + } + } + if cookie_set { + if let Ok(cookie_value) = HeaderValue::from_bytes(cookie_normalized.as_bytes()) { + headers.insert(header::COOKIE, cookie_value); + } + } + } + _ => (), + } + + let host_header_option = request.headers().get(header::HOST); + if let Some(header_data) = host_header_option { + match header_data.to_str() { + Ok(host_header) => { + let host_header_lower_case = host_header.to_lowercase(); + if host_header_lower_case != *host_header { + let host_header_value = match HeaderValue::from_str(&host_header_lower_case) { + Ok(host_header_value) => host_header_value, + Err(err) => { + if error_log_enabled { + logger + .send(LogMessage::new( + format!("Host header sanitation error: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + let response = Response::builder() + .status(StatusCode::BAD_REQUEST) + .header(header::CONTENT_TYPE, "text/html") + .body( + Full::new(Bytes::from(generate_default_error_page( + StatusCode::BAD_REQUEST, + None, + ))) + .map_err(|e| match e {}) + .boxed(), + ) + .unwrap_or_default(); + + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + }; + + request + .headers_mut() + .insert(header::HOST, host_header_value); + } + } + Err(err) => { + if error_log_enabled { + logger + .send(LogMessage::new( + format!("Host header sanitation error: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + let response = Response::builder() + .status(StatusCode::BAD_REQUEST) + .header(header::CONTENT_TYPE, "text/html") + .body( + Full::new(Bytes::from(generate_default_error_page( + StatusCode::BAD_REQUEST, + None, + ))) + .map_err(|e| match e {}) + .boxed(), + ) + .unwrap_or_default(); + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + } + }; + + // Combine the server configuration + let combined_config = match combine_config( + config, + match is_proxy_request || is_connect_proxy_request { + false => match request.headers().get(header::HOST) { + Some(value) => value.to_str().ok(), + None => None, + }, + true => None, + }, + local_address.ip(), + request.uri().path(), + ) { + Some(config) => config, + None => { + if error_log_enabled { + logger + .send(LogMessage::new( + String::from("Cannot determine server configuration"), + true, + )) + .await + .unwrap_or_default(); + } + let response = Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .header(header::CONTENT_TYPE, "text/html") + .body( + Full::new(Bytes::from(generate_default_error_page( + StatusCode::INTERNAL_SERVER_ERROR, + None, + ))) + .map_err(|e| match e {}) + .boxed(), + ) + .unwrap_or_default(); + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + }; + + let url_pathname = request.uri().path(); + let sanitized_url_pathname = match sanitize_url( + url_pathname, + combined_config["allowDoubleSlashes"] + .as_bool() + .unwrap_or_default(), + ) { + Ok(sanitized_url) => sanitized_url, + Err(err) => { + if error_log_enabled { + logger + .send(LogMessage::new( + format!("URL sanitation error: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + let response = + generate_error_response(StatusCode::BAD_REQUEST, &combined_config, &None).await; + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = + HeaderValue::from_str(&header_value.replace("{path}", url_pathname)) + { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + }; + + if sanitized_url_pathname != url_pathname { + let (mut parts, body) = request.into_parts(); + let mut url_parts = parts.uri.into_parts(); + url_parts.path_and_query = Some( + match format!( + "{}{}", + sanitized_url_pathname, + match url_parts.path_and_query { + Some(path_and_query) => { + match path_and_query.query() { + Some(query) => format!("?{}", query), + None => String::from(""), + } + } + None => String::from(""), + } + ) + .parse() + { + Ok(path_and_query) => path_and_query, + Err(err) => { + if error_log_enabled { + logger + .send(LogMessage::new( + format!("URL sanitation error: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + let response = + generate_error_response(StatusCode::BAD_REQUEST, &combined_config, &None).await; + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = HeaderValue::from_str( + &header_value.replace("{path}", &sanitized_url_pathname), + ) { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + }, + ); + parts.uri = match hyper::Uri::from_parts(url_parts) { + Ok(uri) => uri, + Err(err) => { + if error_log_enabled { + logger + .send(LogMessage::new( + format!("URL sanitation error: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + let response = + generate_error_response(StatusCode::BAD_REQUEST, &combined_config, &None).await; + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = + HeaderValue::from_str(&header_value.replace("{path}", &sanitized_url_pathname)) + { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + }; + request = Request::from_parts(parts, body); + } + + if request.uri().path() == "*" { + let response = match request.method() { + &Method::OPTIONS => Response::builder() + .status(StatusCode::NO_CONTENT) + .header(header::ALLOW, "GET, POST, HEAD, OPTIONS") + .body(Empty::new().map_err(|e| match e {}).boxed()) + .unwrap_or_default(), + _ => { + let mut header_map = HeaderMap::new(); + if let Ok(header_value) = HeaderValue::from_str("GET, POST, HEAD, OPTIONS") { + header_map.insert(header::ALLOW, header_value); + }; + generate_error_response(StatusCode::BAD_REQUEST, &combined_config, &Some(header_map)).await + } + }; + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = + HeaderValue::from_str(&header_value.replace("{path}", &sanitized_url_pathname)) + { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + + // HTTP-01 ACME challenge for automatic TLS + if let Some(acme_http01_resolver) = acme_http01_resolver_option { + if let Some(challenge_token) = request + .uri() + .path() + .strip_prefix("/.well-known/acme-challenge/") + { + if let Some(acme_response) = acme_http01_resolver.get_http_01_key_auth(challenge_token) { + let response = Response::builder() + .status(StatusCode::OK) + .header( + CONTENT_TYPE, + HeaderValue::from_static("application/octet-stream"), + ) + .body( + Full::new(Bytes::from(acme_response)) + .map_err(|e| match e {}) + .boxed(), + ) + .unwrap_or_default(); + + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = + HeaderValue::from_str(&header_value.replace("{path}", &sanitized_url_pathname)) + { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + let response = Response::from_parts(response_parts, response_body); + + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + return Ok(response); + } + } + }; + + let cloned_logger = logger.clone(); + let error_logger = match error_log_enabled { + true => ErrorLogger::new(cloned_logger), + false => ErrorLogger::without_logger(), + }; + + if is_connect_proxy_request { + let mut connect_proxy_handlers = None; + for mut handlers in handlers_vec { + if handlers.does_connect_proxy_requests() { + connect_proxy_handlers = Some(handlers); + break; + } + } + + if let Some(mut connect_proxy_handlers) = connect_proxy_handlers { + if let Some(connect_address) = request.uri().authority().map(|auth| auth.to_string()) { + // Variables moved to before "tokio::spawn" to avoid issues with moved values + let client_ip = socket_data.remote_addr.ip(); + let custom_headers_yaml = combined_config["customHeaders"].clone(); + + tokio::spawn(async move { + match hyper::upgrade::on(request).await { + Ok(upgraded_request) => { + let result = connect_proxy_handlers + .connect_proxy_request_handler( + upgraded_request, + &connect_address, + &combined_config, + &socket_data, + &error_logger, + ) + .await; + match result { + Ok(_) => (), + Err(err) => { + error_logger + .log(&format!("Unexpected error for CONNECT request: {}", err)) + .await; + } + } + } + Err(err) => { + error_logger + .log(&format!( + "Error while upgrading HTTP CONNECT request: {}", + err + )) + .await + } + } + }); + + let response = Response::builder() + .body(Empty::new().map_err(|e| match e {}).boxed()) + .unwrap_or_default(); + + if log_enabled { + log_combined( + &logger, + client_ip, + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = custom_headers_yaml.as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = + HeaderValue::from_str(&header_value.replace("{path}", &sanitized_url_pathname)) + { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + Ok(Response::from_parts(response_parts, response_body)) + } else { + let response = Response::builder() + .status(StatusCode::BAD_REQUEST) + .body(Empty::new().map_err(|e| match e {}).boxed()) + .unwrap_or_default(); + + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = + HeaderValue::from_str(&header_value.replace("{path}", &sanitized_url_pathname)) + { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + Ok(Response::from_parts(response_parts, response_body)) + } + } else { + let response = Response::builder() + .status(StatusCode::NOT_IMPLEMENTED) + .body(Empty::new().map_err(|e| match e {}).boxed()) + .unwrap_or_default(); + + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = + HeaderValue::from_str(&header_value.replace("{path}", &sanitized_url_pathname)) + { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + Ok(Response::from_parts(response_parts, response_body)) + } + } else { + let is_websocket_request = is_upgrade_request(&request); + let mut request_data = RequestData::new(request, None, None); + let mut latest_auth_data = None; + let mut executed_handlers = Vec::new(); + for mut handlers in handlers_vec { + if is_websocket_request && handlers.does_websocket_requests(&combined_config, &socket_data) { + let (request, _, _) = request_data.into_parts(); + + // Variables moved to before "tokio::spawn" to avoid issues with moved values + let client_ip = socket_data.remote_addr.ip(); + let custom_headers_yaml = combined_config["customHeaders"].clone(); + let request_uri = request.uri().to_owned(); + + let (original_response, websocket) = match hyper_tungstenite::upgrade(request, None) { + Ok(data) => data, + Err(err) => { + error_logger + .log(&format!("Error while upgrading WebSocket request: {}", err)) + .await; + let response = Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body( + Full::new(Bytes::from(generate_default_error_page( + StatusCode::INTERNAL_SERVER_ERROR, + None, + ))) + .map_err(|e| match e {}) + .boxed(), + ) + .unwrap_or_default(); + + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = HeaderValue::from_str( + &header_value.replace("{path}", &sanitized_url_pathname), + ) { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + }; + + tokio::spawn(async move { + let result = handlers + .websocket_request_handler( + websocket, + &request_uri, + &combined_config, + &socket_data, + &error_logger, + ) + .await; + match result { + Ok(_) => (), + Err(err) => { + error_logger + .log(&format!("Unexpected error for WebSocket request: {}", err)) + .await; + } + } + }); + + let response = original_response.map(|body| body.map_err(|err| match err {}).boxed()); + + if log_enabled { + log_combined( + &logger, + client_ip, + None, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = custom_headers_yaml.as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = + HeaderValue::from_str(&header_value.replace("{path}", &sanitized_url_pathname)) + { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + + let response_result = match is_proxy_request { + true => { + handlers + .proxy_request_handler(request_data, &combined_config, &socket_data, &error_logger) + .await + } + false => { + handlers + .request_handler(request_data, &combined_config, &socket_data, &error_logger) + .await + } + }; + + executed_handlers.push(handlers); + match response_result { + Ok(response) => { + let ( + request_option, + auth_data, + original_url, + response, + status, + headers, + new_remote_address, + parallel_fn, + ) = response.into_parts(); + latest_auth_data = auth_data.clone(); + if let Some(new_remote_address) = new_remote_address { + socket_data.remote_addr = new_remote_address; + }; + if let Some(parallel_fn) = parallel_fn { + // Spawn the function in the web server's Tokio runtime. + // We have implemented parallel_fn parameter in the ResponseData + // because tokio::spawn doesn't work on dynamic libraries, + // see https://github.com/tokio-rs/tokio/issues/6927 + tokio::spawn(parallel_fn); + } + match response { + Some(response) => { + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = HeaderValue::from_str( + &header_value.replace("{path}", &sanitized_url_pathname), + ) { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + let mut response = Response::from_parts(response_parts, response_body); + + while let Some(mut executed_handler) = executed_handlers.pop() { + let response_status = match is_proxy_request { + true => { + executed_handler + .proxy_response_modifying_handler(response) + .await + } + false => executed_handler.response_modifying_handler(response).await, + }; + response = match response_status { + Ok(response) => response, + Err(err) => { + if error_log_enabled { + logger + .send(LogMessage::new( + format!("Unexpected error while serving a request: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + + let response = generate_error_response( + StatusCode::INTERNAL_SERVER_ERROR, + &combined_config, + &headers, + ) + .await; + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + auth_data, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = HeaderValue::from_str( + &header_value.replace("{path}", &sanitized_url_pathname), + ) { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port) + .as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + }; + } + + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + auth_data, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + + return Ok(response); + } + None => match status { + Some(status) => { + let response = generate_error_response(status, &combined_config, &headers).await; + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = HeaderValue::from_str( + &header_value.replace("{path}", &sanitized_url_pathname), + ) { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port) + .as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + let mut response = Response::from_parts(response_parts, response_body); + + while let Some(mut executed_handler) = executed_handlers.pop() { + let response_status = match is_proxy_request { + true => { + executed_handler + .proxy_response_modifying_handler(response) + .await + } + false => executed_handler.response_modifying_handler(response).await, + }; + response = match response_status { + Ok(response) => response, + Err(err) => { + if error_log_enabled { + logger + .send(LogMessage::new( + format!("Unexpected error while serving a request: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + + let response = generate_error_response( + StatusCode::INTERNAL_SERVER_ERROR, + &combined_config, + &headers, + ) + .await; + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + auth_data, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() + { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = HeaderValue::from_str( + &header_value.replace("{path}", &sanitized_url_pathname), + ) { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) + { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port) + .as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + }; + } + + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + auth_data, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + return Ok(response); + } + None => match request_option { + Some(request) => { + request_data = RequestData::new(request, auth_data, original_url); + continue; + } + None => { + break; + } + }, + }, + } + } + Err(err) => { + let response = + generate_error_response(StatusCode::INTERNAL_SERVER_ERROR, &combined_config, &None) + .await; + + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = HeaderValue::from_str( + &header_value.replace("{path}", &sanitized_url_pathname), + ) { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + let mut response = Response::from_parts(response_parts, response_body); + + while let Some(mut executed_handler) = executed_handlers.pop() { + let response_status = match is_proxy_request { + true => { + executed_handler + .proxy_response_modifying_handler(response) + .await + } + false => executed_handler.response_modifying_handler(response).await, + }; + response = match response_status { + Ok(response) => response, + Err(err) => { + if error_log_enabled { + logger + .send(LogMessage::new( + format!("Unexpected error while serving a request: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + + let response = generate_error_response( + StatusCode::INTERNAL_SERVER_ERROR, + &combined_config, + &None, + ) + .await; + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + latest_auth_data, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = HeaderValue::from_str( + &header_value.replace("{path}", &sanitized_url_pathname), + ) { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port) + .as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + }; + } + + if error_log_enabled { + logger + .send(LogMessage::new( + format!("Unexpected error while serving a request: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + latest_auth_data, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + return Ok(response); + } + } + } + + let response = generate_error_response(StatusCode::NOT_FOUND, &combined_config, &None).await; + + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = + HeaderValue::from_str(&header_value.replace("{path}", &sanitized_url_pathname)) + { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + let mut response = Response::from_parts(response_parts, response_body); + + while let Some(mut executed_handler) = executed_handlers.pop() { + let response_status = match is_proxy_request { + true => { + executed_handler + .proxy_response_modifying_handler(response) + .await + } + false => executed_handler.response_modifying_handler(response).await, + }; + response = match response_status { + Ok(response) => response, + Err(err) => { + if error_log_enabled { + logger + .send(LogMessage::new( + format!("Unexpected error while serving a request: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + + let response = + generate_error_response(StatusCode::INTERNAL_SERVER_ERROR, &combined_config, &None) + .await; + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + latest_auth_data, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + let (mut response_parts, response_body) = response.into_parts(); + if let Some(custom_headers_hash) = combined_config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if !response_parts.headers.contains_key(header_name) { + if let Ok(header_value) = HeaderValue::from_str( + &header_value.replace("{path}", &sanitized_url_pathname), + ) { + if let Ok(header_name) = HeaderName::from_str(header_name) { + response_parts.headers.insert(header_name, header_value); + } + } + } + } + } + } + } + if let Some(http3_alt_port) = http3_alt_port { + if let Ok(header_value) = match response_parts.headers.get(header::ALT_SVC) { + Some(value) => HeaderValue::from_bytes( + format!( + "{}, h3=\":{}\", h3-29=\":{}\"", + String::from_utf8_lossy(value.as_bytes()), + http3_alt_port, + http3_alt_port + ) + .as_bytes(), + ), + None => HeaderValue::from_bytes( + format!("h3=\":{}\", h3-29=\":{}\"", http3_alt_port, http3_alt_port).as_bytes(), + ), + } { + response_parts.headers.insert(header::ALT_SVC, header_value); + } + } + response_parts + .headers + .insert(header::SERVER, HeaderValue::from_static(SERVER_SOFTWARE)); + + return Ok(Response::from_parts(response_parts, response_body)); + } + }; + } + + if log_enabled { + log_combined( + &logger, + socket_data.remote_addr.ip(), + latest_auth_data, + log_method, + log_request_path, + log_protocol, + response.status().as_u16(), + match response.headers().get(header::CONTENT_LENGTH) { + Some(header_value) => match header_value.to_str() { + Ok(header_value) => match header_value.parse::() { + Ok(content_length) => Some(content_length), + Err(_) => response.body().size_hint().exact(), + }, + Err(_) => response.body().size_hint().exact(), + }, + None => response.body().size_hint().exact(), + }, + log_referrer, + log_user_agent, + ) + .await; + } + Ok(response) + } +} + +#[allow(clippy::too_many_arguments)] +pub async fn request_handler( + request: Request>, + remote_address: SocketAddr, + local_address: SocketAddr, + encrypted: bool, + config: Arc, + logger: Sender, + handlers_vec: Vec>, + acme_http01_resolver_option: Option>, + http3_alt_port: Option, +) -> Result>, anyhow::Error> { + let timeout_yaml = &config["global"]["timeout"]; + if timeout_yaml.is_null() { + request_handler_wrapped( + request, + remote_address, + local_address, + encrypted, + config, + logger, + handlers_vec, + acme_http01_resolver_option, + http3_alt_port, + ) + .await + .map_err(|e| anyhow::anyhow!(e)) + } else { + let timeout_millis = timeout_yaml.as_i64().unwrap_or(300000) as u64; + match timeout( + Duration::from_millis(timeout_millis), + request_handler_wrapped( + request, + remote_address, + local_address, + encrypted, + config, + logger, + handlers_vec, + acme_http01_resolver_option, + http3_alt_port, + ), + ) + .await + { + Ok(response) => response.map_err(|e| anyhow::anyhow!(e)), + Err(_) => Err(anyhow::anyhow!("The client or server has timed out")), + } + } +} diff --git a/ferron/src/res/server_software.rs b/ferron/src/res/server_software.rs new file mode 100644 index 0000000000000000000000000000000000000000..df32c077c3100631115749440304fd071d35f049 --- /dev/null +++ b/ferron/src/res/server_software.rs @@ -0,0 +1 @@ +pub const SERVER_SOFTWARE: &str = "Ferron"; diff --git a/ferron/src/server.rs b/ferron/src/server.rs new file mode 100644 index 0000000000000000000000000000000000000000..97c069b2fea5c83c8578d01da18ce16144b77a47 --- /dev/null +++ b/ferron/src/server.rs @@ -0,0 +1,1639 @@ +use std::error::Error; +use std::net::{IpAddr, Ipv6Addr, SocketAddr}; +use std::sync::Arc; +use std::{env, thread}; + +use crate::ferron_request_handler::request_handler; +use crate::ferron_util::load_tls::{load_certs, load_private_key}; +use crate::ferron_util::sni::CustomSniResolver; +use crate::ferron_util::validate_config::{prepare_config_for_validation, validate_config}; + +use crate::ferron_common::{LogMessage, ServerModule, ServerModuleHandlers}; +use async_channel::Sender; +use chrono::prelude::*; +use futures_util::StreamExt; +use h3_quinn::quinn; +use h3_quinn::quinn::crypto::rustls::QuicServerConfig; +use http::Response; +use http_body_util::{BodyExt, StreamBody}; +use hyper::body::{Buf, Bytes, Frame, Incoming}; +use hyper::service::service_fn; +use hyper::Request; +use hyper_util::rt::{TokioExecutor, TokioIo, TokioTimer}; +use ocsp_stapler::Stapler; +use rustls::crypto::ring::cipher_suite::*; +use rustls::crypto::ring::default_provider; +use rustls::crypto::ring::kx_group::*; +use rustls::server::{Acceptor, WebPkiClientVerifier}; +use rustls::sign::CertifiedKey; +use rustls::version::{TLS12, TLS13}; +use rustls::{RootCertStore, ServerConfig}; +use rustls_acme::acme::ACME_TLS_ALPN_NAME; +use rustls_acme::caches::DirCache; +use rustls_acme::{is_tls_alpn_challenge, AcmeConfig, ResolvesServerCertAcme, UseChallenge}; +use rustls_native_certs::load_native_certs; +use tokio::fs; +use tokio::io::{AsyncWriteExt, BufWriter}; +use tokio::net::{TcpListener, TcpStream}; +use tokio::runtime::Handle; +use tokio::sync::Mutex; +use tokio::time; +use tokio_rustls::server::TlsStream; +use tokio_rustls::LazyConfigAcceptor; +use yaml_rust2::Yaml; + +// Enum for maybe TLS stream +enum MaybeTlsStream { + Tls(TlsStream), + Plain(TcpStream), +} + +// Function to accept and handle incoming QUIC connections +#[allow(clippy::too_many_arguments)] +async fn accept_quic_connection( + connection_attempt: quinn::Incoming, + local_address: SocketAddr, + config: Arc, + logger: Sender, + modules: Arc>>, +) { + let remote_address = connection_attempt.remote_address(); + + let logger_clone = logger.clone(); + + tokio::task::spawn(async move { + match connection_attempt.await { + Ok(connection) => { + let mut h3_conn: h3::server::Connection = + match h3::server::Connection::new(h3_quinn::Connection::new(connection)).await { + Ok(h3_conn) => h3_conn, + Err(err) => { + logger_clone + .send(LogMessage::new( + format!("Error serving HTTP/3 connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + } + }; + + loop { + match h3_conn.accept().await { + Ok(Some(resolver)) => { + let config = config.clone(); + let remote_address = remote_address; + + let logger_clone = logger_clone.clone(); + let modules = modules.clone(); + tokio::spawn(async move { + let handlers_vec = modules + .iter() + .map(|module| module.get_handlers(Handle::current())); + + let (request, stream) = resolver; + let (mut send, receive) = stream.split(); + let request_body_stream = futures_util::stream::unfold( + (receive, false), + async move |(mut receive, mut is_body_finished)| loop { + if !is_body_finished { + match receive.recv_data().await { + Ok(Some(mut data)) => { + return Some(( + Ok(Frame::data(data.copy_to_bytes(data.remaining()))), + (receive, false), + )) + } + Ok(None) => is_body_finished = true, + Err(err) => { + return Some(( + Err(std::io::Error::other(err.to_string())), + (receive, false), + )) + } + } + } else { + match receive.recv_trailers().await { + Ok(Some(trailers)) => { + return Some((Ok(Frame::trailers(trailers)), (receive, true))) + } + Ok(None) => is_body_finished = true, + Err(err) => { + return Some(( + Err(std::io::Error::other(err.to_string())), + (receive, true), + )) + } + } + } + }, + ); + let request_body = BodyExt::boxed(StreamBody::new(request_body_stream)); + let (request_parts, _) = request.into_parts(); + let request = Request::from_parts(request_parts, request_body); + let handlers_vec_clone = handlers_vec + .clone() + .collect::>>(); + let response = match request_handler( + request, + remote_address, + local_address, + true, + config, + logger_clone.clone(), + handlers_vec_clone, + None, + None, + ) + .await + { + Ok(response) => response, + Err(err) => { + logger_clone + .send(LogMessage::new( + format!("Error serving HTTP/3 connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + } + }; + let (response_parts, mut response_body) = response.into_parts(); + if let Err(err) = send + .send_response(Response::from_parts(response_parts, ())) + .await + { + logger_clone + .send(LogMessage::new( + format!("Error serving HTTP/3 connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + } + let mut had_trailers = false; + while let Some(chunk) = response_body.frame().await { + match chunk { + Ok(frame) => { + if frame.is_data() { + match frame.into_data() { + Ok(data) => { + if let Err(err) = send.send_data(data).await { + logger_clone + .send(LogMessage::new( + format!("Error serving HTTP/3 connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + } + } + Err(_) => { + logger_clone + .send(LogMessage::new( + "Error serving HTTP/3 connection: the frame isn't really a data frame".to_string(), + true, + )) + .await + .unwrap_or_default(); + return; + } + } + } else if frame.is_trailers() { + match frame.into_trailers() { + Ok(trailers) => { + had_trailers = true; + if let Err(err) = send.send_trailers(trailers).await { + logger_clone + .send(LogMessage::new( + format!("Error serving HTTP/3 connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + } + } + Err(_) => { + logger_clone + .send(LogMessage::new( + "Error serving HTTP/3 connection: the frame isn't really a trailers frame".to_string(), + true, + )) + .await + .unwrap_or_default(); + return; + } + } + } + } + Err(err) => { + logger_clone + .send(LogMessage::new( + format!("Error serving HTTP/3 connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + } + } + } + if !had_trailers { + if let Err(err) = send.finish().await { + logger_clone + .send(LogMessage::new( + format!("Error serving HTTP/3 connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + } + }); + } + Ok(None) => break, + Err(err) => { + logger_clone + .send(LogMessage::new( + format!("Error serving HTTP/3 connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + } + } + } + } + Err(err) => { + logger_clone + .send(LogMessage::new( + format!("Cannot accept a connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + } + }); +} + +// Function to accept and handle incoming connections +#[allow(clippy::too_many_arguments)] +async fn accept_connection( + stream: TcpStream, + remote_address: SocketAddr, + tls_config_option: Option<(Arc, Option>)>, + acme_http01_resolver_option: Option>, + config: Arc, + logger: Sender, + modules: Arc>>, + http3_enabled: Option, +) { + // Disable Nagle algorithm to improve performance + if let Err(err) = stream.set_nodelay(true) { + logger + .send(LogMessage::new( + format!("Cannot disable Nagle algorithm: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + }; + + let config = config.clone(); + let local_address = match stream.local_addr() { + Ok(local_address) => local_address, + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot obtain local address of the connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + } + }; + + let logger_clone = logger.clone(); + + tokio::task::spawn(async move { + let maybe_tls_stream = if let Some((tls_config, acme_config_option)) = tls_config_option { + let start_handshake = match LazyConfigAcceptor::new(Acceptor::default(), stream).await { + Ok(start_handshake) => start_handshake, + Err(err) => { + logger + .send(LogMessage::new( + format!("Error during TLS handshake: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + } + }; + + if let Some(acme_config) = acme_config_option { + if is_tls_alpn_challenge(&start_handshake.client_hello()) { + match start_handshake.into_stream(acme_config).await { + Ok(_) => (), + Err(err) => { + logger + .send(LogMessage::new( + format!("Error during TLS handshake: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + } + }; + return; + } + } + + let tls_stream = match start_handshake.into_stream(tls_config).await { + Ok(tls_stream) => tls_stream, + Err(err) => { + logger + .send(LogMessage::new( + format!("Error during TLS handshake: {}", err), + true, + )) + .await + .unwrap_or_default(); + return; + } + }; + + MaybeTlsStream::Tls(tls_stream) + } else { + MaybeTlsStream::Plain(stream) + }; + + if let MaybeTlsStream::Tls(tls_stream) = maybe_tls_stream { + let alpn_protocol = tls_stream.get_ref().1.alpn_protocol(); + let is_http2; + + if config["global"]["enableHTTP2"].as_bool().unwrap_or(true) { + if alpn_protocol == Some("h2".as_bytes()) { + is_http2 = true; + } else { + // Don't allow HTTP/2 if "h2" ALPN offering was't present + is_http2 = false; + } + } else { + is_http2 = false; + } + + let io = TokioIo::new(tls_stream); + let handlers_vec = modules + .iter() + .map(|module| module.get_handlers(Handle::current())); + + if is_http2 { + let mut http2_builder = hyper::server::conn::http2::Builder::new(TokioExecutor::new()); + http2_builder.timer(TokioTimer::new()); + if let Some(initial_window_size) = + config["global"]["http2Settings"]["initialWindowSize"].as_i64() + { + http2_builder.initial_stream_window_size(initial_window_size as u32); + } + if let Some(max_frame_size) = config["global"]["http2Settings"]["maxFrameSize"].as_i64() { + http2_builder.max_frame_size(max_frame_size as u32); + } + if let Some(max_concurrent_streams) = + config["global"]["http2Settings"]["maxConcurrentStreams"].as_i64() + { + http2_builder.max_concurrent_streams(max_concurrent_streams as u32); + } + if let Some(max_header_list_size) = + config["global"]["http2Settings"]["maxHeaderListSize"].as_i64() + { + http2_builder.max_header_list_size(max_header_list_size as u32); + } + if let Some(enable_connect_protocol) = + config["global"]["http2Settings"]["enableConnectProtocol"].as_bool() + { + if enable_connect_protocol { + http2_builder.enable_connect_protocol(); + } + } + + if let Err(err) = http2_builder + .serve_connection( + io, + service_fn(move |request: Request| { + let config = config.clone(); + let logger = logger_clone.clone(); + let handlers_vec_clone = handlers_vec + .clone() + .collect::>>(); + let acme_http01_resolver_option_clone = acme_http01_resolver_option.clone(); + let (request_parts, request_body) = request.into_parts(); + let request = Request::from_parts( + request_parts, + request_body + .map_err(|e| std::io::Error::other(e.to_string())) + .boxed(), + ); + request_handler( + request, + remote_address, + local_address, + true, + config, + logger, + handlers_vec_clone, + acme_http01_resolver_option_clone, + http3_enabled, + ) + }), + ) + .await + { + logger + .send(LogMessage::new( + format!("Error serving HTTPS connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + } else { + let mut http1_builder = hyper::server::conn::http1::Builder::new(); + + // The timer is neccessary for the header timeout to work to mitigate Slowloris. + http1_builder.timer(TokioTimer::new()); + + if let Err(err) = http1_builder + .serve_connection( + io, + service_fn(move |request: Request| { + let config = config.clone(); + let logger = logger_clone.clone(); + let handlers_vec_clone = handlers_vec + .clone() + .collect::>>(); + let acme_http01_resolver_option_clone = acme_http01_resolver_option.clone(); + let (request_parts, request_body) = request.into_parts(); + let request = Request::from_parts( + request_parts, + request_body + .map_err(|e| std::io::Error::other(e.to_string())) + .boxed(), + ); + request_handler( + request, + remote_address, + local_address, + true, + config, + logger, + handlers_vec_clone, + acme_http01_resolver_option_clone, + http3_enabled, + ) + }), + ) + .with_upgrades() + .await + { + logger + .send(LogMessage::new( + format!("Error serving HTTPS connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + } + } else if let MaybeTlsStream::Plain(stream) = maybe_tls_stream { + let io = TokioIo::new(stream); + let handlers_vec = modules + .iter() + .map(|module| module.get_handlers(Handle::current())); + + let mut http1_builder = hyper::server::conn::http1::Builder::new(); + + // The timer is neccessary for the header timeout to work to mitigate Slowloris. + http1_builder.timer(TokioTimer::new()); + + if let Err(err) = http1_builder + .serve_connection( + io, + service_fn(move |request: Request| { + let config = config.clone(); + let logger = logger_clone.clone(); + let handlers_vec_clone = handlers_vec + .clone() + .collect::>>(); + let acme_http01_resolver_option_clone = acme_http01_resolver_option.clone(); + let (request_parts, request_body) = request.into_parts(); + let request = Request::from_parts( + request_parts, + request_body + .map_err(|e| std::io::Error::other(e.to_string())) + .boxed(), + ); + request_handler( + request, + remote_address, + local_address, + false, + config, + logger, + handlers_vec_clone, + acme_http01_resolver_option_clone, + http3_enabled, + ) + }), + ) + .with_upgrades() + .await + { + logger + .send(LogMessage::new( + format!("Error serving HTTP connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + } + }); +} + +// Main server event loop +#[allow(clippy::type_complexity)] +async fn server_event_loop( + yaml_config: Arc, + logger: Sender, + modules: Vec>, + module_error: Option, + modules_optional_builtin: Vec, + first_startup: bool, +) -> Result<(), Box> { + if let Some(module_error) = module_error { + logger + .send(LogMessage::new(module_error.to_string(), true)) + .await + .unwrap_or_default(); + Err(module_error)? + } + + let prepared_config = match prepare_config_for_validation(&yaml_config) { + Ok(prepared_config) => prepared_config, + Err(err) => { + logger + .send(LogMessage::new( + format!("Server configuration validation failed: {}", err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Server configuration validation failed: {}", + err + )))? + } + }; + + for (config_to_validate, is_global, is_location) in prepared_config { + match validate_config( + config_to_validate, + is_global, + is_location, + &modules_optional_builtin, + ) { + Ok(unused_properties) => { + for unused_property in unused_properties { + logger + .send(LogMessage::new( + format!( + "Unused configuration property detected: \"{}\". You might load an appropriate module to use this configuration property", + unused_property + ), + true, + )) + .await + .unwrap_or_default(); + } + } + Err(err) => { + logger + .send(LogMessage::new( + format!("Server configuration validation failed: {}", err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Server configuration validation failed: {}", + err + )))? + } + }; + } + + let mut crypto_provider = default_provider(); + + if let Some(cipher_suite) = yaml_config["global"]["cipherSuite"].as_vec() { + let mut cipher_suites = Vec::new(); + let cipher_suite_iter = cipher_suite.iter(); + for cipher_suite_yaml in cipher_suite_iter { + if let Some(cipher_suite) = cipher_suite_yaml.as_str() { + let cipher_suite_to_add = match cipher_suite { + "TLS_AES_128_GCM_SHA256" => TLS13_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384" => TLS13_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256" => TLS13_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" => TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" => TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" => { + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + } + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" => TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" => TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" => { + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + } + _ => { + logger + .send(LogMessage::new( + format!("The \"{}\" cipher suite is not supported", cipher_suite), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "The \"{}\" cipher suite is not supported", + cipher_suite + )))? + } + }; + cipher_suites.push(cipher_suite_to_add); + } + } + crypto_provider.cipher_suites = cipher_suites; + } + + if let Some(ecdh_curves) = yaml_config["global"]["ecdhCurve"].as_vec() { + let mut kx_groups = Vec::new(); + let ecdh_curves_iter = ecdh_curves.iter(); + for ecdh_curve_yaml in ecdh_curves_iter { + if let Some(ecdh_curve) = ecdh_curve_yaml.as_str() { + let kx_group_to_add = match ecdh_curve { + "secp256r1" => SECP256R1, + "secp384r1" => SECP384R1, + "x25519" => X25519, + _ => { + logger + .send(LogMessage::new( + format!("The \"{}\" ECDH curve is not supported", ecdh_curve), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "The \"{}\" ECDH curve is not supported", + ecdh_curve + )))? + } + }; + kx_groups.push(kx_group_to_add); + } + } + crypto_provider.kx_groups = kx_groups; + } + + let crypto_provider_cloned = crypto_provider.clone(); + let mut sni_resolver = CustomSniResolver::new(); + let mut certified_keys = Vec::new(); + + let mut automatic_tls_enabled = false; + let mut acme_letsencrypt_production = true; + let acme_use_http_challenge = yaml_config["global"]["useAutomaticTLSHTTPChallenge"] + .as_bool() + .unwrap_or(false); + let acme_challenge_type = if acme_use_http_challenge { + UseChallenge::Http01 + } else { + UseChallenge::TlsAlpn01 + }; + + // Read automatic TLS configuration + if let Some(read_automatic_tls_enabled) = yaml_config["global"]["enableAutomaticTLS"].as_bool() { + automatic_tls_enabled = read_automatic_tls_enabled; + } + + let acme_contact = yaml_config["global"]["automaticTLSContactEmail"].as_str(); + let acme_cache = yaml_config["global"]["automaticTLSContactCacheDirectory"] + .as_str() + .map(|s| s.to_string()) + .map(DirCache::new); + + if let Some(read_acme_letsencrypt_production) = + yaml_config["global"]["automaticTLSLetsEncryptProduction"].as_bool() + { + acme_letsencrypt_production = read_acme_letsencrypt_production; + } + + if !automatic_tls_enabled { + // Load public certificate and private key + if let Some(cert_path) = yaml_config["global"]["cert"].as_str() { + if let Some(key_path) = yaml_config["global"]["key"].as_str() { + let certs = match load_certs(cert_path) { + Ok(certs) => certs, + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot load the \"{}\" TLS certificate: {}", cert_path, err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Cannot load the \"{}\" TLS certificate: {}", + cert_path, err + )))? + } + }; + let key = match load_private_key(key_path) { + Ok(key) => key, + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot load the \"{}\" private key: {}", cert_path, err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Cannot load the \"{}\" private key: {}", + cert_path, err + )))? + } + }; + let signing_key = match crypto_provider_cloned.key_provider.load_private_key(key) { + Ok(key) => key, + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot load the \"{}\" private key: {}", cert_path, err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Cannot load the \"{}\" private key: {}", + cert_path, err + )))? + } + }; + let certified_key = CertifiedKey::new(certs, signing_key); + sni_resolver.load_fallback_cert_key(Arc::new(certified_key)); + } + } + + if let Some(sni) = yaml_config["global"]["sni"].as_hash() { + let sni_hostnames = sni.keys(); + for sni_hostname_unknown in sni_hostnames { + if let Some(sni_hostname) = sni_hostname_unknown.as_str() { + if let Some(cert_path) = sni[sni_hostname_unknown]["cert"].as_str() { + if let Some(key_path) = sni[sni_hostname_unknown]["key"].as_str() { + let certs = match load_certs(cert_path) { + Ok(certs) => certs, + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot load the \"{}\" TLS certificate: {}", cert_path, err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Cannot load the \"{}\" TLS certificate: {}", + cert_path, err + )))? + } + }; + let key = match load_private_key(key_path) { + Ok(key) => key, + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot load the \"{}\" private key: {}", cert_path, err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Cannot load the \"{}\" private key: {}", + cert_path, err + )))? + } + }; + let signing_key = match crypto_provider_cloned.key_provider.load_private_key(key) { + Ok(key) => key, + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot load the \"{}\" private key: {}", cert_path, err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Cannot load the \"{}\" private key: {}", + cert_path, err + )))? + } + }; + let certified_key_arc = Arc::new(CertifiedKey::new(certs, signing_key)); + sni_resolver.load_host_cert_key(sni_hostname, certified_key_arc.clone()); + certified_keys.push(certified_key_arc); + } + } + } + } + } + } + + // Build TLS configuration + let tls_config_builder_wants_versions = + ServerConfig::builder_with_provider(Arc::new(crypto_provider_cloned)); + + // Very simple minimum and maximum TLS version logic for now... + let min_tls_version_option = yaml_config["global"]["tlsMinVersion"].as_str(); + let max_tls_version_option = yaml_config["global"]["tlsMaxVersion"].as_str(); + let tls_config_builder_wants_verifier = match min_tls_version_option { + Some("TLSv1.3") => match max_tls_version_option { + Some("TLSv1.2") => { + logger + .send(LogMessage::new( + String::from("The maximum TLS version is older than the minimum TLS version"), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(String::from( + "The maximum TLS version is older than the minimum TLS version" + )))? + } + Some("TLSv1.3") | None => { + match tls_config_builder_wants_versions.with_protocol_versions(&[&TLS13]) { + Ok(builder) => builder, + Err(err) => { + logger + .send(LogMessage::new( + format!("Couldn't create the TLS server configuration: {}", err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Couldn't create the TLS server configuration: {}", + err + )))? + } + } + } + _ => { + logger + .send(LogMessage::new( + String::from("Invalid maximum TLS version"), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(String::from("Invalid maximum TLS version")))? + } + }, + Some("TLSv1.2") | None => match max_tls_version_option { + Some("TLSv1.2") => { + match tls_config_builder_wants_versions.with_protocol_versions(&[&TLS12]) { + Ok(builder) => builder, + Err(err) => { + logger + .send(LogMessage::new( + format!("Couldn't create the TLS server configuration: {}", err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Couldn't create the TLS server configuration: {}", + err + )))? + } + } + } + Some("TLSv1.3") | None => { + match tls_config_builder_wants_versions.with_protocol_versions(&[&TLS12, &TLS13]) { + Ok(builder) => builder, + Err(err) => { + logger + .send(LogMessage::new( + format!("Couldn't create the TLS server configuration: {}", err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Couldn't create the TLS server configuration: {}", + err + )))? + } + } + } + _ => { + logger + .send(LogMessage::new( + String::from("Invalid maximum TLS version"), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(String::from("Invalid maximum TLS version")))? + } + }, + _ => { + logger + .send(LogMessage::new( + String::from("Invalid minimum TLS version"), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(String::from("Invalid minimum TLS version")))? + } + }; + + let tls_config_builder_wants_server_cert = + match yaml_config["global"]["useClientCertificate"].as_bool() { + Some(true) => { + let mut roots = RootCertStore::empty(); + let certs_result = load_native_certs(); + if !certs_result.errors.is_empty() { + logger + .send(LogMessage::new( + format!( + "Couldn't load the native certificate store: {}", + certs_result.errors[0] + ), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Couldn't load the native certificate store: {}", + certs_result.errors[0] + )))? + } + let certs = certs_result.certs; + + for cert in certs { + match roots.add(cert) { + Ok(_) => (), + Err(err) => { + logger + .send(LogMessage::new( + format!( + "Couldn't add a certificate to the certificate store: {}", + err + ), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Couldn't add a certificate to the certificate store: {}", + err + )))? + } + } + } + tls_config_builder_wants_verifier + .with_client_cert_verifier(WebPkiClientVerifier::builder(Arc::new(roots)).build()?) + } + _ => tls_config_builder_wants_verifier.with_no_client_auth(), + }; + + let mut tls_config; + + let mut addr = SocketAddr::from((IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 80)); + let mut addr_tls = SocketAddr::from((IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 443)); + let mut tls_enabled = false; + let mut non_tls_disabled = false; + + // Install a process-wide cryptography provider. If it fails, then warn about it. + if crypto_provider.install_default().is_err() && first_startup { + logger + .send(LogMessage::new( + "Cannot install a process-wide cryptography provider".to_string(), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!( + "Cannot install a process-wide cryptography provider" + ))?; + } + + // Read port configurations from YAML + if let Some(read_port) = yaml_config["global"]["port"].as_i64() { + addr = SocketAddr::from(( + IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), + match read_port.try_into() { + Ok(port) => port, + Err(_) => { + logger + .send(LogMessage::new(String::from("Invalid HTTP port"), true)) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!("Invalid HTTP port"))? + } + }, + )); + } else if let Some(read_port) = yaml_config["global"]["port"].as_str() { + addr = match read_port.parse() { + Ok(addr) => addr, + Err(_) => { + logger + .send(LogMessage::new(String::from("Invalid HTTP port"), true)) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!("Invalid HTTP port"))? + } + }; + } + + if let Some(read_tls_enabled) = yaml_config["global"]["secure"].as_bool() { + tls_enabled = read_tls_enabled; + if let Some(read_non_tls_disabled) = + yaml_config["global"]["disableNonEncryptedServer"].as_bool() + { + non_tls_disabled = read_non_tls_disabled; + } + } + + if let Some(read_port) = yaml_config["global"]["sport"].as_i64() { + addr_tls = SocketAddr::from(( + IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), + match read_port.try_into() { + Ok(port) => port, + Err(_) => { + logger + .send(LogMessage::new(String::from("Invalid HTTPS port"), true)) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!("Invalid HTTPS port"))? + } + }, + )); + } else if let Some(read_port) = yaml_config["global"]["sport"].as_str() { + addr_tls = match read_port.parse() { + Ok(addr) => addr, + Err(_) => { + logger + .send(LogMessage::new(String::from("Invalid HTTPS port"), true)) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!("Invalid HTTPS port"))? + } + }; + } + + // Get domains for ACME configuration + let mut acme_domains = Vec::new(); + if let Some(hosts_config) = yaml_config["hosts"].as_vec() { + for host_yaml in hosts_config.iter() { + if let Some(host) = host_yaml.as_hash() { + if let Some(domain_yaml) = host.get(&Yaml::from_str("domain")) { + if let Some(domain) = domain_yaml.as_str() { + if !domain.contains("*") { + acme_domains.push(domain); + } + } + } + } + } + } + + // Create ACME configuration + let mut acme_config = AcmeConfig::new(acme_domains).challenge_type(acme_challenge_type); + if let Some(acme_contact_unwrapped) = acme_contact { + acme_config = acme_config.contact_push(format!("mailto:{}", acme_contact_unwrapped)); + } + let mut acme_config_with_cache = acme_config.cache_option(acme_cache); + acme_config_with_cache = + acme_config_with_cache.directory_lets_encrypt(acme_letsencrypt_production); + + let (acme_config, acme_http01_resolver) = if tls_enabled && automatic_tls_enabled { + let mut acme_state = acme_config_with_cache.state(); + + let acme_resolver = acme_state.resolver(); + + // Create TLS configuration + tls_config = if yaml_config["global"]["enableOCSPStapling"] + .as_bool() + .unwrap_or(true) + { + tls_config_builder_wants_server_cert + .with_cert_resolver(Arc::new(Stapler::new(acme_resolver.clone()))) + } else { + tls_config_builder_wants_server_cert.with_cert_resolver(acme_resolver.clone()) + }; + + let acme_logger = logger.clone(); + tokio::spawn(async move { + while let Some(acme_result) = acme_state.next().await { + if let Err(acme_error) = acme_result { + acme_logger + .send(LogMessage::new( + format!("Error while obtaining a TLS certificate: {}", acme_error), + true, + )) + .await + .unwrap_or_default(); + } + } + }); + + if acme_use_http_challenge { + (None, Some(acme_resolver)) + } else { + let mut acme_config = tls_config.clone(); + acme_config.alpn_protocols.push(ACME_TLS_ALPN_NAME.to_vec()); + + (Some(acme_config), None) + } + } else { + // Create TLS configuration + tls_config = if yaml_config["global"]["enableOCSPStapling"] + .as_bool() + .unwrap_or(true) + { + let ocsp_stapler_arc = Arc::new(Stapler::new(Arc::new(sni_resolver))); + for certified_key in certified_keys.iter() { + ocsp_stapler_arc.preload(certified_key.clone()); + } + tls_config_builder_wants_server_cert.with_cert_resolver(ocsp_stapler_arc.clone()) + } else { + tls_config_builder_wants_server_cert.with_cert_resolver(Arc::new(sni_resolver)) + }; + + // Drop the ACME configuration + drop(acme_config_with_cache); + (None, None) + }; + + let quic_config = if tls_enabled + && yaml_config["global"]["enableHTTP3"] + .as_bool() + .unwrap_or(false) + { + let mut quic_tls_config = tls_config.clone(); + quic_tls_config.max_early_data_size = u32::MAX; + quic_tls_config.alpn_protocols = vec![b"h3".to_vec(), b"h3-29".to_vec()]; + let quic_config = quinn::ServerConfig::with_crypto(Arc::new(match QuicServerConfig::try_from( + quic_tls_config, + ) { + Ok(quinn_config) => quinn_config, + Err(err) => { + logger + .send(LogMessage::new( + format!("There was a problem when starting HTTP/3 server: {}", err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "There was a problem when starting HTTP/3 server: {}", + err + )))? + } + })); + Some(quic_config) + } else { + None + }; + + // Configure ALPN protocols + let mut alpn_protocols = vec![b"http/1.1".to_vec(), b"http/1.0".to_vec()]; + if yaml_config["global"]["enableHTTP2"] + .as_bool() + .unwrap_or(true) + { + alpn_protocols.insert(0, b"h2".to_vec()); + } + tls_config.alpn_protocols = alpn_protocols; + let tls_config_arc = Arc::new(tls_config); + let acme_config_arc = acme_config.map(Arc::new); + + let mut listener = None; + let mut listener_tls = None; + let mut listener_quic = None; + + // Bind to the specified ports + if !non_tls_disabled { + println!("HTTP server is listening at {}", addr); + listener = Some(match TcpListener::bind(addr).await { + Ok(listener) => listener, + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot listen to HTTP port: {}", err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Cannot listen to HTTP port: {}", + err + )))? + } + }); + } + + if tls_enabled { + println!("HTTPS server is listening at {}", addr_tls); + listener_tls = Some(match TcpListener::bind(addr_tls).await { + Ok(listener) => listener, + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot listen to HTTPS port: {}", err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Cannot listen to HTTPS port: {}", + err + )))? + } + }); + + if let Some(quic_config) = quic_config { + println!("HTTP/3 server is listening at {}", addr_tls); + listener_quic = Some(match quinn::Endpoint::server(quic_config, addr_tls) { + Ok(listener) => listener, + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot listen to HTTP/3 port: {}", err), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!(format!( + "Cannot listen to HTTP/3 port: {}", + err + )))? + } + }); + } + } + + // Wrap the modules vector in an Arc + let modules_arc = Arc::new(modules); + + let http3_enabled = if listener_tls.is_some() { + Some(addr_tls.port()) + } else { + None + }; + + // Main loop to accept incoming connections + loop { + let listener_borrowed = &listener; + let listener_accept = async move { + if let Some(listener) = listener_borrowed { + listener.accept().await + } else { + futures_util::future::pending().await + } + }; + + let listener_tls_borrowed = &listener_tls; + let listener_tls_accept = async move { + if let Some(listener_tls) = listener_tls_borrowed { + listener_tls.accept().await + } else { + futures_util::future::pending().await + } + }; + + let listener_quic_borrowed = &listener_quic; + let listener_quic_accept = async move { + if let Some(listener_quic) = listener_quic_borrowed { + listener_quic.accept().await + } else { + futures_util::future::pending().await + } + }; + + if listener_borrowed.is_none() + && listener_tls_borrowed.is_none() + && listener_quic_borrowed.is_none() + { + logger + .send(LogMessage::new( + String::from("No server is listening"), + true, + )) + .await + .unwrap_or_default(); + Err(anyhow::anyhow!("No server is listening"))?; + } + + tokio::select! { + status = listener_accept => { + match status { + Ok((stream, remote_address)) => { + accept_connection( + stream, + remote_address, + None, + acme_http01_resolver.clone(), + yaml_config.clone(), + logger.clone(), + modules_arc.clone(), + None + ) + .await; + } + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot accept a connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + } + }, + status = listener_tls_accept => { + match status { + Ok((stream, remote_address)) => { + accept_connection( + stream, + remote_address, + Some((tls_config_arc.clone(), acme_config_arc.clone())), + None, + yaml_config.clone(), + logger.clone(), + modules_arc.clone(), + http3_enabled + ) + .await; + } + Err(err) => { + logger + .send(LogMessage::new( + format!("Cannot accept a connection: {}", err), + true, + )) + .await + .unwrap_or_default(); + } + } + }, + status = listener_quic_accept => { + match status { + Some(connection_attempt) => { + let local_ip = SocketAddr::new(connection_attempt.local_ip().unwrap_or(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0))), addr_tls.port()); + accept_quic_connection( + connection_attempt, + local_ip, + yaml_config.clone(), + logger.clone(), + modules_arc.clone() + ) + .await; + } + None => { + logger + .send(LogMessage::new( + "HTTP/3 connections can't be accepted anymore".to_string(), + true, + )) + .await + .unwrap_or_default(); + } + } + } + }; + } +} + +// Start the server +#[allow(clippy::type_complexity)] +pub fn start_server( + yaml_config: Arc, + modules: Vec>, + module_error: Option, + modules_optional_builtin: Vec, + first_startup: bool, +) -> Result> { + if let Some(environment_variables_hash) = yaml_config["global"]["environmentVariables"].as_hash() + { + let environment_variables_hash_iter = environment_variables_hash.iter(); + for (variable_name, variable_value) in environment_variables_hash_iter { + if let Some(variable_name) = variable_name.as_str() { + if let Some(variable_value) = variable_value.as_str() { + if !variable_name.is_empty() + && !variable_name.contains('\0') + && !variable_name.contains('=') + && !variable_value.contains('\0') + { + // Safety: the environment variables are set before threads are spawned + // The `std::env::set_var` function is safe to use in single-threaded environments + // In Rust 2024 edition, the `std::env::set_var` function would be `unsafe`. + env::set_var(variable_name, variable_value); + } + } + } + } + } + + let available_parallelism = thread::available_parallelism()?.get(); + + // Create Tokio runtime for the server + let server_runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(available_parallelism) + .max_blocking_threads(1536) + .event_interval(25) + .thread_name("server-pool") + .enable_all() + .build()?; + + // Create Tokio runtime for logging + let log_runtime = tokio::runtime::Builder::new_multi_thread() + .worker_threads(match available_parallelism / 2 { + 0 => 1, + non_zero => non_zero, + }) + .max_blocking_threads(768) + .thread_name("log-pool") + .enable_time() + .build()?; + + let (logger, receive_log) = async_channel::bounded::(10000); + + let log_filename = yaml_config["global"]["logFilePath"] + .as_str() + .map(String::from); + let error_log_filename = yaml_config["global"]["errorLogFilePath"] + .as_str() + .map(String::from); + + log_runtime.spawn(async move { + let log_file = match log_filename { + Some(log_filename) => Some( + fs::OpenOptions::new() + .append(true) + .create(true) + .open(log_filename) + .await, + ), + None => None, + }; + + let error_log_file = match error_log_filename { + Some(error_log_filename) => Some( + fs::OpenOptions::new() + .append(true) + .create(true) + .open(error_log_filename) + .await, + ), + None => None, + }; + + let log_file_wrapped = match log_file { + Some(Ok(file)) => Some(Arc::new(Mutex::new(BufWriter::with_capacity(131072, file)))), + Some(Err(e)) => { + eprintln!("Failed to open log file: {}", e); + None + } + None => None, + }; + + let error_log_file_wrapped = match error_log_file { + Some(Ok(file)) => Some(Arc::new(Mutex::new(BufWriter::with_capacity(131072, file)))), + Some(Err(e)) => { + eprintln!("Failed to open error log file: {}", e); + None + } + None => None, + }; + + // The logs are written when the log message is received by the log event loop, and flushed every 100 ms, improving the server performance. + let log_file_wrapped_cloned_for_sleep = log_file_wrapped.clone(); + let error_log_file_wrapped_cloned_for_sleep = error_log_file_wrapped.clone(); + tokio::task::spawn(async move { + let mut interval = time::interval(time::Duration::from_millis(100)); + loop { + interval.tick().await; + if let Some(log_file_wrapped_cloned) = log_file_wrapped_cloned_for_sleep.clone() { + let mut locked_file = log_file_wrapped_cloned.lock().await; + locked_file.flush().await.unwrap_or_default(); + } + if let Some(error_log_file_wrapped_cloned) = error_log_file_wrapped_cloned_for_sleep.clone() + { + let mut locked_file = error_log_file_wrapped_cloned.lock().await; + locked_file.flush().await.unwrap_or_default(); + } + } + }); + + // Logging loop + while let Ok(message) = receive_log.recv().await { + let (mut message, is_error) = message.get_message(); + let log_file_wrapped_cloned = if !is_error { + log_file_wrapped.clone() + } else { + error_log_file_wrapped.clone() + }; + + if let Some(log_file_wrapped_cloned) = log_file_wrapped_cloned { + tokio::task::spawn(async move { + let mut locked_file = log_file_wrapped_cloned.lock().await; + if is_error { + let now: DateTime = Local::now(); + let formatted_time = now.format("%Y-%m-%d %H:%M:%S").to_string(); + message = format!("[{}]: {}", formatted_time, message); + } + message.push('\n'); + if let Err(e) = locked_file.write(message.as_bytes()).await { + eprintln!("Failed to write to log file: {}", e); + } + }); + } + } + }); + + // Run the server event loop + let result = server_runtime.block_on(async { + let event_loop_future = server_event_loop( + yaml_config, + logger, + modules, + module_error, + modules_optional_builtin, + first_startup, + ); + + #[cfg(unix)] + { + use tokio::signal; + + match signal::unix::signal(signal::unix::SignalKind::hangup()) { + Ok(mut signal) => { + tokio::select! { + result = event_loop_future => { + // Sleep the Tokio runtime to ensure error logs are saved + time::sleep(tokio::time::Duration::from_millis(100)).await; + + result.map(|_| false) + }, + _ = signal.recv() => Ok(true) + } + } + Err(_) => { + let result = event_loop_future.await; + + // Sleep the Tokio runtime to ensure error logs are saved + time::sleep(tokio::time::Duration::from_millis(100)).await; + + result.map(|_| false) + } + } + } + + #[cfg(not(unix))] + { + let result = event_loop_future.await; + + // Sleep the Tokio runtime to ensure error logs are saved + time::sleep(tokio::time::Duration::from_millis(100)).await; + + result.map(|_| false) + } + }); + + // Wait 10 seconds or until all tasks are complete + server_runtime.shutdown_timeout(time::Duration::from_secs(10)); + + result +} diff --git a/ferron/src/util/anti_xss.rs b/ferron/src/util/anti_xss.rs new file mode 100644 index 0000000000000000000000000000000000000000..cafc42d399ad69b13e78c542fa4d750c680225d9 --- /dev/null +++ b/ferron/src/util/anti_xss.rs @@ -0,0 +1,7 @@ +pub fn anti_xss(input: &str) -> String { + input + .replace("&", "&") + .replace("<", "<") + .replace(">", ">") + .replace("\"", """) +} diff --git a/ferron/src/util/asgi_messages.rs b/ferron/src/util/asgi_messages.rs new file mode 100644 index 0000000000000000000000000000000000000000..07cd433a50e73d2f18936a720566e2be4ff05d49 --- /dev/null +++ b/ferron/src/util/asgi_messages.rs @@ -0,0 +1,285 @@ +use std::path::PathBuf; + +use http::{request::Parts, Uri}; +use pyo3::{prelude::*, types::PyDict}; + +use crate::ferron_common::{ErrorLogger, SocketData}; + +pub enum IncomingAsgiMessage { + Init(AsgiInitData), + Message(IncomingAsgiMessageInner), +} + +pub enum OutgoingAsgiMessage { + Message(OutgoingAsgiMessageInner), + Finished, + Error(PyErr), +} + +pub enum AsgiInitData { + Lifespan, + Http(AsgiHttpInitData), + Websocket(AsgiWebsocketInitData), +} + +pub struct AsgiHttpInitData { + pub hyper_request_parts: Parts, + pub original_request_uri: Option, + pub socket_data: SocketData, + #[allow(dead_code)] + pub error_logger: ErrorLogger, + pub wwwroot: PathBuf, + pub execute_pathbuf: PathBuf, +} + +pub struct AsgiWebsocketInitData { + pub uri: Uri, + pub socket_data: SocketData, + #[allow(dead_code)] + pub error_logger: ErrorLogger, + pub wwwroot: PathBuf, + pub execute_pathbuf: PathBuf, +} + +pub enum IncomingAsgiMessageInner { + LifespanStartup, + LifespanShutdown, + HttpRequest(AsgiHttpBody), + HttpDisconnect, + WebsocketConnect, + WebsocketReceive(AsgiWebsocketMessage), + WebsocketDisconnect(AsgiWebsocketClose), +} + +pub enum OutgoingAsgiMessageInner { + LifespanStartupComplete, + #[allow(dead_code)] + LifespanStartupFailed(LifespanFailed), + LifespanShutdownComplete, + #[allow(dead_code)] + LifespanShutdownFailed(LifespanFailed), + HttpResponseStart(AsgiHttpResponseStart), + HttpResponseBody(AsgiHttpBody), + HttpResponseTrailers(AsgiHttpTrailers), + #[allow(dead_code)] + WebsocketAccept(AsgiWebsocketAccept), + WebsocketSend(AsgiWebsocketMessage), + #[allow(dead_code)] + WebsocketClose(AsgiWebsocketClose), + Unknown, +} + +#[allow(dead_code)] +pub struct LifespanFailed { + pub message: String, +} + +pub struct AsgiHttpBody { + pub body: Vec, + pub more_body: bool, +} + +pub struct AsgiHttpResponseStart { + pub status: u16, + pub headers: Vec<(Vec, Vec)>, + pub trailers: bool, +} + +pub struct AsgiHttpTrailers { + pub headers: Vec<(Vec, Vec)>, + pub more_trailers: bool, +} + +#[allow(dead_code)] +pub struct AsgiWebsocketAccept { + pub subprotocol: Option, + pub headers: Vec<(Vec, Vec)>, +} + +pub struct AsgiWebsocketClose { + pub code: u16, + pub reason: String, +} + +pub struct AsgiWebsocketMessage { + pub bytes: Option>, + pub text: Option, +} + +pub fn asgi_event_to_outgoing_struct( + event: Bound<'_, PyDict>, +) -> PyResult { + let event_type = match event.get_item("type")? { + Some(event_type) => event_type.extract::()?, + None => Err(anyhow::anyhow!("Cannot send event with no type specified"))?, + }; + + match event_type.as_str() { + "lifespan.startup.complete" => Ok(OutgoingAsgiMessageInner::LifespanStartupComplete), + "lifespan.shutdown.complete" => Ok(OutgoingAsgiMessageInner::LifespanShutdownComplete), + "lifespan.startup.failed" => Ok(OutgoingAsgiMessageInner::LifespanStartupFailed( + LifespanFailed { + message: event + .get_item("message")? + .map_or(Ok("".to_string()), |x| x.extract())?, + }, + )), + "lifespan.shutdown.failed" => Ok(OutgoingAsgiMessageInner::LifespanShutdownFailed( + LifespanFailed { + message: event + .get_item("message")? + .map_or(Ok("".to_string()), |x| x.extract())?, + }, + )), + "http.response.start" => Ok(OutgoingAsgiMessageInner::HttpResponseStart( + AsgiHttpResponseStart { + status: match event.get_item("status")?.map(|x| x.extract()) { + Some(status) => status?, + None => Err(anyhow::anyhow!("The HTTP response must have a status code"))?, + }, + headers: event.get_item("headers")?.map_or( + Ok(Ok(Vec::new())), + |header_list_py: Bound<'_, PyAny>| { + header_list_py + .extract::>>>() + .map(|header_list| { + let mut new_header_list = Vec::new(); + for header in header_list { + if header.len() != 2 { + return Err(anyhow::anyhow!("Headers must be two-item iterables")); + } + let mut header_iter = header.into_iter(); + new_header_list.push(( + header_iter.next().unwrap_or(b"".to_vec()), + header_iter.next().unwrap_or(b"".to_vec()), + )); + } + Ok(new_header_list) + }) + }, + )??, + trailers: event + .get_item("trailers")? + .map_or(Ok(false), |x| x.extract())?, + }, + )), + "http.response.body" => Ok(OutgoingAsgiMessageInner::HttpResponseBody(AsgiHttpBody { + body: event + .get_item("body")? + .map_or(Ok(b"".to_vec()), |x| x.extract())?, + more_body: event + .get_item("more_body")? + .map_or(Ok(false), |x| x.extract())?, + })), + "http.response.trailers" => Ok(OutgoingAsgiMessageInner::HttpResponseTrailers( + AsgiHttpTrailers { + headers: event.get_item("headers")?.map_or( + Ok(Ok(Vec::new())), + |header_list_py: Bound<'_, PyAny>| { + header_list_py + .extract::>>>() + .map(|header_list| { + let mut new_header_list = Vec::new(); + for header in header_list { + if header.len() != 2 { + return Err(anyhow::anyhow!("Headers must be two-item iterables")); + } + let mut header_iter = header.into_iter(); + new_header_list.push(( + header_iter.next().unwrap_or(b"".to_vec()), + header_iter.next().unwrap_or(b"".to_vec()), + )); + } + Ok(new_header_list) + }) + }, + )??, + more_trailers: event + .get_item("more_trailers")? + .map_or(Ok(false), |x| x.extract())?, + }, + )), + "websocket.accept" => Ok(OutgoingAsgiMessageInner::WebsocketAccept( + AsgiWebsocketAccept { + subprotocol: event + .get_item("subprotocol")? + .map_or(Ok(None), |x| x.extract())?, + headers: event.get_item("headers")?.map_or( + Ok(Ok(Vec::new())), + |header_list_py: Bound<'_, PyAny>| { + header_list_py + .extract::>>>() + .map(|header_list| { + let mut new_header_list = Vec::new(); + for header in header_list { + if header.len() != 2 { + return Err(anyhow::anyhow!("Headers must be two-item iterables")); + } + let mut header_iter = header.into_iter(); + new_header_list.push(( + header_iter.next().unwrap_or(b"".to_vec()), + header_iter.next().unwrap_or(b"".to_vec()), + )); + } + Ok(new_header_list) + }) + }, + )??, + }, + )), + "websocket.close" => Ok(OutgoingAsgiMessageInner::WebsocketClose( + AsgiWebsocketClose { + code: event.get_item("code")?.map_or(Ok(1000), |x| x.extract())?, + reason: event + .get_item("reason")? + .map_or(Ok(None), |x| x.extract())? + .unwrap_or("".to_string()), + }, + )), + "websocket.send" => Ok(OutgoingAsgiMessageInner::WebsocketSend( + AsgiWebsocketMessage { + bytes: event.get_item("bytes")?.map_or(Ok(None), |x| x.extract())?, + text: event.get_item("text")?.map_or(Ok(None), |x| x.extract())?, + }, + )), + _ => Ok(OutgoingAsgiMessageInner::Unknown), + } +} + +pub fn incoming_struct_to_asgi_event(incoming: IncomingAsgiMessageInner) -> PyResult> { + Python::with_gil(move |py| -> PyResult<_> { + let event = PyDict::new(py); + + match incoming { + IncomingAsgiMessageInner::LifespanStartup => { + event.set_item("type", "lifespan.startup")?; + } + IncomingAsgiMessageInner::LifespanShutdown => { + event.set_item("type", "lifespan.shutdown")?; + } + IncomingAsgiMessageInner::HttpRequest(http_request) => { + event.set_item("type", "lifespan.shutdown")?; + event.set_item("body", http_request.body)?; + event.set_item("more_body", http_request.more_body)?; + } + IncomingAsgiMessageInner::HttpDisconnect => { + event.set_item("type", "http.disconnect")?; + } + IncomingAsgiMessageInner::WebsocketConnect => { + event.set_item("type", "websocket.connect")?; + } + IncomingAsgiMessageInner::WebsocketDisconnect(websocket_close) => { + event.set_item("type", "websocket.disconnect")?; + event.set_item("code", websocket_close.code)?; + event.set_item("reason", websocket_close.reason)?; + } + IncomingAsgiMessageInner::WebsocketReceive(websocket_message) => { + event.set_item("type", "websocket.receive")?; + event.set_item("bytes", websocket_message.bytes)?; + event.set_item("text", websocket_message.text)?; + } + }; + + Ok(event.unbind()) + }) +} diff --git a/ferron/src/util/asgi_structs.rs b/ferron/src/util/asgi_structs.rs new file mode 100644 index 0000000000000000000000000000000000000000..a892b5200783282640be18ef414a02cb7854c347 --- /dev/null +++ b/ferron/src/util/asgi_structs.rs @@ -0,0 +1,52 @@ +pub struct AsgiApplicationWrap { + pub domain: Option, + pub ip: Option, + pub asgi_application_id: Option, + pub asgi_application_path: Option, + pub asgi_path: Option, + pub locations: Vec, +} + +impl AsgiApplicationWrap { + pub fn new( + domain: Option, + ip: Option, + asgi_application_id: Option, + asgi_application_path: Option, + asgi_path: Option, + locations: Vec, + ) -> Self { + Self { + domain, + ip, + asgi_application_id, + asgi_application_path, + asgi_path, + locations, + } + } +} + +pub struct AsgiApplicationLocationWrap { + pub path: String, + pub asgi_application_id: usize, + #[allow(dead_code)] + pub asgi_application_path: String, + pub asgi_path: Option, +} + +impl AsgiApplicationLocationWrap { + pub fn new( + path: String, + asgi_application_id: usize, + asgi_application_path: String, + asgi_path: Option, + ) -> Self { + Self { + path, + asgi_application_id, + asgi_application_path, + asgi_path, + } + } +} diff --git a/ferron/src/util/cgi_response.rs b/ferron/src/util/cgi_response.rs new file mode 100644 index 0000000000000000000000000000000000000000..57f2519316d1f11fb481168aaac0cce568b46919 --- /dev/null +++ b/ferron/src/util/cgi_response.rs @@ -0,0 +1,186 @@ +use memmem::{Searcher, TwoWaySearcher}; +use std::io::Error; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{AsyncRead, AsyncReadExt, ReadBuf}; + +// Constant defining the capacity of the response buffer +const RESPONSE_BUFFER_CAPACITY: usize = 16384; + +// Struct representing a response, which wraps an async read stream +pub struct CgiResponse +where + R: AsyncRead + Unpin, +{ + stream: R, + response_buf: Vec, + response_head_length: Option, +} + +impl CgiResponse +where + R: AsyncRead + Unpin, +{ + // Constructor to create a new CgiResponse instance + pub fn new(stream: R) -> Self { + Self { + stream, + response_buf: Vec::with_capacity(RESPONSE_BUFFER_CAPACITY), + response_head_length: None, + } + } + + // Asynchronous method to get the response headers + pub async fn get_head(&mut self) -> Result<&[u8], Error> { + let mut temp_buf = [0u8; RESPONSE_BUFFER_CAPACITY]; + let rnrn = TwoWaySearcher::new(b"\r\n\r\n"); + let nrnr = TwoWaySearcher::new(b"\n\r\n\r"); + let nn = TwoWaySearcher::new(b"\n\n"); + let rr = TwoWaySearcher::new(b"\r\r"); + let to_parse_length; + + loop { + // Read data from the stream into the temporary buffer + let read_bytes = self.stream.read(&mut temp_buf).await?; + + // If no bytes are read, return an empty response head + if read_bytes == 0 { + self.response_head_length = Some(0); + return Ok(&[0u8; 0]); + } + + // If the response buffer exceeds the capacity, return an empty response head + if self.response_buf.len() + read_bytes > RESPONSE_BUFFER_CAPACITY { + self.response_head_length = Some(0); + return Ok(&[0u8; 0]); + } + + // Determine the starting point for searching the "\r\n\r\n" sequence + let begin_rnrn_or_nrnr_search = self.response_buf.len().saturating_sub(3); + let begin_rr_or_nn_search = self.response_buf.len().saturating_sub(1); + self.response_buf.extend_from_slice(&temp_buf[..read_bytes]); + + // Search for the "\r\n\r\n" sequence in the response buffer + if let Some(rnrn_index) = rnrn.search_in(&self.response_buf[begin_rnrn_or_nrnr_search..]) { + to_parse_length = begin_rnrn_or_nrnr_search + rnrn_index + 4; + break; + } else if let Some(nrnr_index) = + nrnr.search_in(&self.response_buf[begin_rnrn_or_nrnr_search..]) + { + to_parse_length = begin_rnrn_or_nrnr_search + nrnr_index + 4; + break; + } else if let Some(nn_index) = nn.search_in(&self.response_buf[begin_rr_or_nn_search..]) { + to_parse_length = begin_rr_or_nn_search + nn_index + 2; + break; + } else if let Some(rr_index) = rr.search_in(&self.response_buf[begin_rr_or_nn_search..]) { + to_parse_length = begin_rr_or_nn_search + rr_index + 2; + break; + } + } + + // Set the length of the response header + self.response_head_length = Some(to_parse_length); + + // Return the response header as a byte slice + Ok(&self.response_buf[..to_parse_length]) + } +} + +// Implementation of AsyncRead for the CgiResponse struct +impl AsyncRead for CgiResponse +where + R: AsyncRead + Unpin, +{ + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + // If the response header length is known and the buffer contains more data than the header length + if let Some(response_head_length) = self.response_head_length { + if self.response_buf.len() > response_head_length { + let remaining_data = &self.response_buf[response_head_length..]; + let to_read = remaining_data.len().min(buf.remaining()); + buf.put_slice(&remaining_data[..to_read]); + self.response_head_length = Some(response_head_length + to_read); + return Poll::Ready(Ok(())); + } + } + + // Create a temporary buffer to hold the data to be consumed + let stream = Pin::new(&mut self.stream); + match stream.poll_read(cx, buf) { + Poll::Ready(Ok(())) => Poll::Ready(Ok(())), + other => other, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tokio::io::AsyncReadExt; + use tokio_test::io::Builder; + + #[tokio::test] + async fn test_get_head() { + let data = b"Content-Type: text/plain\r\n\r\n"; + let mut stream = Builder::new().read(data).build(); + let mut response = CgiResponse::new(&mut stream); + + let head = response.get_head().await.unwrap(); + assert_eq!(head, b"Content-Type: text/plain\r\n\r\n"); + } + + #[tokio::test] + async fn test_get_head_nn() { + let data = b"Content-Type: text/plain\n\n"; + let mut stream = Builder::new().read(data).build(); + let mut response = CgiResponse::new(&mut stream); + + let head = response.get_head().await.unwrap(); + assert_eq!(head, b"Content-Type: text/plain\n\n"); + } + + #[tokio::test] + async fn test_get_head_large_headers() { + let data = b"Content-Type: text/plain\r\n"; + let large_header = vec![b'A'; RESPONSE_BUFFER_CAPACITY + 10] + .into_iter() + .collect::>(); + let mut stream = Builder::new().read(data).read(&large_header).build(); + let mut response = CgiResponse::new(&mut stream); + + let result = response.get_head().await; + assert_eq!(result.unwrap().len(), 0); + + // Consume the remaining data to avoid panicking + let mut remaining_data = vec![0u8; RESPONSE_BUFFER_CAPACITY + 10]; + let _ = response.stream.read(&mut remaining_data).await; + } + + #[tokio::test] + async fn test_get_head_premature_eof() { + let data = b"Content-Type: text/plain\r\n"; + let mut stream = Builder::new().read(data).build(); + let mut response = CgiResponse::new(&mut stream); + + let result = response.get_head().await; + assert_eq!(result.unwrap().len(), 0); + } + + #[tokio::test] + async fn test_poll_read() { + let data = b"Content-Type: text/plain\r\n\r\nHello, world!"; + let mut stream = Builder::new().read(data).build(); + let mut response = CgiResponse::new(&mut stream); + + let head = response.get_head().await.unwrap(); + assert_eq!(head, b"Content-Type: text/plain\r\n\r\n"); + + let mut buf = vec![0u8; 13]; + let n = response.read(&mut buf).await.unwrap(); + assert_eq!(n, 13); + assert_eq!(&buf[..n], b"Hello, world!"); + } +} diff --git a/ferron/src/util/combine_config.rs b/ferron/src/util/combine_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..39e4984607fe6aa09a5267dd771047e9258e1741 --- /dev/null +++ b/ferron/src/util/combine_config.rs @@ -0,0 +1,339 @@ +use std::{net::IpAddr, sync::Arc}; + +use yaml_rust2::{yaml::Hash, Yaml}; + +use crate::ferron_util::{ + ip_match::ip_match, match_hostname::match_hostname, match_location::match_location, +}; + +pub fn combine_config( + config: Arc, + hostname: Option<&str>, + client_ip: IpAddr, + path: &str, +) -> Option { + let global_config = config["global"].as_hash(); + let combined_config = global_config.cloned(); + + if let Some(host_config) = config["hosts"].as_vec() { + for host in host_config { + if let Some(host_hashtable) = host.as_hash() { + let domain_matched = host_hashtable + .get(&Yaml::String("domain".to_string())) + .and_then(Yaml::as_str) + .map(|domain| match_hostname(Some(domain), hostname)) + .unwrap_or(true); + + let ip_matched = host_hashtable + .get(&Yaml::String("ip".to_string())) + .and_then(Yaml::as_str) + .map(|ip| ip_match(ip, client_ip)) + .unwrap_or(true); + + if domain_matched && ip_matched { + return Some(merge_host_configs(combined_config, host_hashtable, path)); + } + } + } + } + + combined_config.map(Yaml::Hash) +} + +fn merge_host_configs(global: Option, host: &Hash, path: &str) -> Yaml { + let mut merged = global.unwrap_or_default(); + let mut locations = None; + + for (key, value) in host { + if let Some(key) = key.as_str() { + if key == "locations" { + if let Some(obtained_locations) = value.as_vec() { + locations = Some(obtained_locations); + } + } else { + match value { + Yaml::Array(host_array) => { + merged + .entry(Yaml::String(key.to_string())) + .and_modify(|global_val| { + if let Yaml::Array(global_array) = global_val { + global_array.extend(host_array.clone()); + } else { + *global_val = Yaml::Array(host_array.clone()); + } + }) + .or_insert_with(|| Yaml::Array(host_array.clone())); + } + Yaml::Hash(host_hash) => { + merged + .entry(Yaml::String(key.to_string())) + .and_modify(|global_val| { + if let Yaml::Hash(global_hash) = global_val { + for (k, v) in host_hash { + global_hash.insert(k.clone(), v.clone()); + } + } else { + *global_val = Yaml::Hash(host_hash.clone()); + } + }) + .or_insert_with(|| Yaml::Hash(host_hash.clone())); + } + _ => { + merged.insert(Yaml::String(key.to_string()), value.clone()); + } + } + } + } + } + + if let Some(locations) = locations { + if let Ok(decoded_path) = urlencoding::decode(path) { + for location in locations { + if let Some(location_hashtable) = location.as_hash() { + let path_matched = location_hashtable + .get(&Yaml::String("path".to_string())) + .and_then(Yaml::as_str) + .map(|path_match| match_location(path_match, &decoded_path)) + .unwrap_or(true); + + if path_matched { + return merge_location_configs(Some(merged), location_hashtable); + } + } + } + } + } + + Yaml::Hash(merged) +} + +fn merge_location_configs(global: Option, location: &Hash) -> Yaml { + let mut merged = global.unwrap_or_default(); + + for (key, value) in location { + if let Some(key) = key.as_str() { + match value { + Yaml::Array(host_array) => { + merged + .entry(Yaml::String(key.to_string())) + .and_modify(|global_val| { + if let Yaml::Array(global_array) = global_val { + global_array.extend(host_array.clone()); + } else { + *global_val = Yaml::Array(host_array.clone()); + } + }) + .or_insert_with(|| Yaml::Array(host_array.clone())); + } + Yaml::Hash(host_hash) => { + merged + .entry(Yaml::String(key.to_string())) + .and_modify(|global_val| { + if let Yaml::Hash(global_hash) = global_val { + for (k, v) in host_hash { + global_hash.insert(k.clone(), v.clone()); + } + } else { + *global_val = Yaml::Hash(host_hash.clone()); + } + }) + .or_insert_with(|| Yaml::Hash(host_hash.clone())); + } + _ => { + merged.insert(Yaml::String(key.to_string()), value.clone()); + } + } + } + } + + Yaml::Hash(merged) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::net::{IpAddr, Ipv4Addr}; + use yaml_rust2::{Yaml, YamlLoader}; + + fn create_test_config() -> Arc { + let yaml_str = r#" + global: + key1: + - global_value1 + key2: + - global_value2 + hosts: + - domain: example.com + ip: 192.168.1.1 + key1: + - host_value1 + key2: + - host_value2 + - domain: test.com + ip: 192.168.1.2 + key3: + - host_value3 + "#; + + let docs = YamlLoader::load_from_str(yaml_str).unwrap(); + Arc::new(docs[0].clone()) + } + + #[test] + fn test_combine_config_with_matching_hostname_and_ip() { + let config = create_test_config(); + let hostname = Some("example.com"); + let client_ip = IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)); + + let result = combine_config(config, hostname, client_ip, "/"); + assert!(result.is_some()); + + let result_yaml = result.unwrap(); + let result_hash = result_yaml.as_hash().unwrap(); + + assert_eq!( + result_hash + .get(&Yaml::String("key1".to_string())) + .unwrap() + .as_vec() + .unwrap() + .len(), + 2 + ); + assert_eq!( + result_hash + .get(&Yaml::String("key2".to_string())) + .unwrap() + .as_vec() + .unwrap() + .len(), + 2 + ); + } + + #[test] + fn test_combine_config_with_non_matching_hostname() { + let config = create_test_config(); + let hostname = Some("nonexistent.com"); + let client_ip = IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)); + + let result = combine_config(config, hostname, client_ip, "/"); + assert!(result + .unwrap() + .as_hash() + .unwrap() + .get(&Yaml::String(String::from("key3"))) + .is_none()); + } + + #[test] + fn test_combine_config_with_non_matching_ip() { + let config = create_test_config(); + let hostname = Some("example.com"); + let client_ip = IpAddr::V4(Ipv4Addr::new(192, 168, 1, 2)); + + let result = combine_config(config, hostname, client_ip, "/"); + assert!(result + .unwrap() + .as_hash() + .unwrap() + .get(&Yaml::String(String::from("key3"))) + .is_none()); + } + + #[test] + fn test_combine_config_with_global_only() { + let yaml_str = r#" + global: + key1: value1 + key2: + - global_value2 + hosts: [] + "#; + + let docs = YamlLoader::load_from_str(yaml_str).unwrap(); + let config = Arc::new(docs[0].clone()); + let hostname = None; + let client_ip = IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)); + + let result = combine_config(config, hostname, client_ip, "/"); + assert!(result.is_some()); + + let result_yaml = result.unwrap(); + let result_hash = result_yaml.as_hash().unwrap(); + + assert_eq!( + result_hash + .get(&Yaml::String("key1".to_string())) + .unwrap() + .as_str() + .unwrap(), + "value1" + ); + assert_eq!( + result_hash + .get(&Yaml::String("key2".to_string())) + .unwrap() + .as_vec() + .unwrap() + .len(), + 1 + ); + } + + #[test] + fn test_combine_config_with_empty_host_config() { + let yaml_str = r#" + global: + key1: value1 + key2: + - global_value2 + hosts: [] + "#; + + let docs = YamlLoader::load_from_str(yaml_str).unwrap(); + let config_yaml = docs[0].clone(); + + let hostname = Some("example.com"); + let client_ip = IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)); + + let result = combine_config(Arc::new(config_yaml), hostname, client_ip, "/"); + assert!(result.is_some()); + + let result_yaml = result.unwrap(); + + assert_eq!(result_yaml["key1"].as_str().unwrap(), "value1"); + assert_eq!(result_yaml["key2"].as_vec().unwrap().len(), 1); + } + + #[test] + fn test_combine_config_with_path_match() { + let yaml_str = r#" + global: + key1: + - global_value1 + key2: + - global_value2 + hosts: + - domain: example.com + ip: 192.168.1.1 + locations: + - path: /test + key3: + - location_value + "#; + + let docs = YamlLoader::load_from_str(yaml_str).unwrap(); + let config_yaml = docs[0].clone(); + + let hostname = Some("example.com"); + let client_ip = IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)); + + let result = combine_config(Arc::new(config_yaml), hostname, client_ip, "/test"); + assert!(result.is_some()); + + let result_yaml = result.unwrap(); + + assert_eq!(result_yaml["key3"].as_vec().unwrap().len(), 1); + } +} diff --git a/ferron/src/util/copy_move.rs b/ferron/src/util/copy_move.rs new file mode 100644 index 0000000000000000000000000000000000000000..88926f7996a3eb8a9857ded6b68f9ad1bfcdddf9 --- /dev/null +++ b/ferron/src/util/copy_move.rs @@ -0,0 +1,154 @@ +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +use futures_util::ready; +use tokio::io::{AsyncRead, AsyncWrite}; + +struct ZeroWriter { + inner: I, +} + +impl Future for ZeroWriter +where + I: AsyncWrite + Unpin, +{ + type Output = Result<(), tokio::io::Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let empty_slice = [0u8; 0]; + ready!(Pin::new(&mut self.inner).poll_write(cx, &empty_slice))?; + ready!(Pin::new(&mut self.inner).poll_flush(cx))?; + Poll::Ready(Ok(())) + } +} + +pub struct Copier { + reader: R, + writer: W, + zero_packet: bool, +} + +impl Copier +where + R: AsyncRead + Unpin, + W: AsyncWrite + Unpin, +{ + pub fn new(reader: R, writer: W) -> Self { + Self { + reader, + writer, + zero_packet: false, + } + } + + pub fn with_zero_packet_writing(reader: R, writer: W) -> Self { + Self { + reader, + writer, + zero_packet: true, + } + } + + pub async fn copy(mut self) -> Result { + let copied_size = tokio::io::copy(&mut self.reader, &mut self.writer).await?; + if self.zero_packet { + let zero_writer = ZeroWriter { inner: self.writer }; + zero_writer.await?; + } + Ok(copied_size) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::pin::Pin; + use tokio::io::{ReadBuf, Result}; + + struct MockReader { + data: Vec, + position: usize, + } + + impl MockReader { + fn new(data: Vec) -> Self { + Self { data, position: 0 } + } + } + + impl AsyncRead for MockReader { + fn poll_read( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + let this = self.get_mut(); + let remaining = this.data.len() - this.position; + + if remaining == 0 { + return Poll::Ready(Ok(())); // EOF + } + + let to_read = remaining.min(buf.remaining()); + buf.put_slice(&this.data[this.position..this.position + to_read]); + this.position += to_read; + + Poll::Ready(Ok(())) + } + } + + struct MockWriter { + data: Vec, + } + + impl MockWriter { + fn new() -> Self { + Self { data: Vec::new() } + } + } + + impl AsyncWrite for MockWriter { + fn poll_write(self: Pin<&mut Self>, _cx: &mut Context<'_>, buf: &[u8]) -> Poll> { + let this = self.get_mut(); + this.data.extend_from_slice(buf); + Poll::Ready(Ok(buf.len())) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + } + + #[tokio::test] + async fn test_copy() { + let data = b"Hello, world!".to_vec(); + let reader = MockReader::new(data.clone()); + let writer = MockWriter::new(); + + let copy = Copier::new(reader, writer).copy(); + let result = copy.await; + + assert!(result.is_ok()); + assert_eq!(result.unwrap(), data.len() as u64); + } + + #[tokio::test] + async fn test_copy_empty() { + let data = b"".to_vec(); + let reader = MockReader::new(data.clone()); + let writer = MockWriter::new(); + + let copy = Copier::new(reader, writer).copy(); + let result = copy.await; + + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 0); + } +} diff --git a/ferron/src/util/error_pages.rs b/ferron/src/util/error_pages.rs new file mode 100644 index 0000000000000000000000000000000000000000..8b16da773850f4f5a0d0c33cc990f0ce3e4ae566 --- /dev/null +++ b/ferron/src/util/error_pages.rs @@ -0,0 +1,86 @@ +use crate::ferron_util::anti_xss::anti_xss; + +pub fn generate_default_error_page( + status_code: hyper::StatusCode, + server_administrator_email: Option<&str>, +) -> String { + let status_code_name = match status_code.canonical_reason() { + Some(reason) => format!("{} {}", status_code.as_u16(), reason), + None => format!("{}", status_code.as_u16()), + }; + + let error_500 = format!("The server encountered an unexpected error. You may need to contact the server administrator{} to resolve the error.", match server_administrator_email { + Some(email_address) => format!(" at {}", email_address), + None => String::from("") + }); + let status_code_description = String::from(match status_code.as_u16() { + 200 => "The request was successful!", + 201 => "A new resource was successfully created.", + 202 => "The request was accepted but hasn't been fully processed yet.", + 400 => "The request was invalid.", + 401 => "Authentication is required to access the resource.", + 402 => "Payment is required to access the resource.", + 403 => "You're not authorized to access this resource.", + 404 => "The requested resource wasn't found. Double-check the URL if entered manually.", + 405 => "The request method is not allowed for this resource.", + 406 => "The server cannot provide a response in an acceptable format.", + 407 => "Proxy authentication is required.", + 408 => "The request took too long and timed out.", + 409 => "There's a conflict with the current state of the server.", + 410 => "The requested resource has been permanently removed.", + 411 => "The request must include a Content-Length header.", + 412 => "The request doesn't meet the server's preconditions.", + 413 => "The request is too large for the server to process.", + 414 => "The requested URL is too long.", + 415 => "The server doesn't support the request's media type.", + 416 => "The requested content range is invalid or unavailable.", + 417 => "The expectation in the Expect header couldn't be met.", + 418 => "This server (a teapot) refuses to make coffee! ☕", + 421 => "The request was directed to the wrong server.", + 422 => "The server couldn't process the provided content.", + 423 => "The requested resource is locked.", + 424 => "The request failed due to a dependency on another failed request.", + 425 => "The server refuses to process a request that might be replayed.", + 426 => "The client must upgrade its protocol to proceed.", + 428 => "A precondition is required for this request, but it wasn't included.", + 429 => "Too many requests were sent in a short period.", + 431 => "The request headers are too large.", + 451 => "Access to this resource is restricted due to legal reasons.", + 497 => "A non-TLS request was sent to an HTTPS server.", + 500 => &error_500, + 501 => "The server doesn't support the requested functionality.", + 502 => "The server, acting as a gateway, received an invalid response.", + 503 => { + "The server is temporarily unavailable (e.g., maintenance or overload). Try again later." + } + 504 => "The server, acting as a gateway, timed out waiting for a response.", + 505 => "The HTTP version used in the request isn't supported.", + 506 => "The Variant header caused a content negotiation loop.", + 507 => "The server lacks sufficient storage to complete the request.", + 508 => "The server detected an infinite loop while processing the request.", + 509 => "Bandwidth limit exceeded on the server.", + 510 => "The server requires an extended HTTP request, but the client didn't send one.", + 511 => "Authentication is required to access the network.", + 598 => "The proxy server didn't receive a response in time.", + 599 => "The proxy server couldn't establish a connection in time.", + _ => "No description found for the status code.", + }); + + format!( + " + + + + + {} + + +

{}

+

{}

+ +", + anti_xss(&status_code_name), + anti_xss(&status_code_name), + anti_xss(&status_code_description) + ) +} diff --git a/ferron/src/util/fcgi_decoder.rs b/ferron/src/util/fcgi_decoder.rs new file mode 100644 index 0000000000000000000000000000000000000000..693eeeb2065a1cd34f316f8718be8966dd331191 --- /dev/null +++ b/ferron/src/util/fcgi_decoder.rs @@ -0,0 +1,233 @@ +use hyper::body::{Buf, Bytes}; +use tokio_util::bytes::BytesMut; +use tokio_util::codec::Decoder; + +#[derive(Debug)] +pub enum FcgiDecodedData { + Stdout(Bytes), + Stderr(Bytes), +} + +enum FcgiDecodeState { + ReadingHead, + ReadingContent, + Finished, +} + +pub struct FcgiDecoder { + header: Vec, + content_length: u16, + padding_length: u8, + state: FcgiDecodeState, +} + +impl FcgiDecoder { + pub fn new() -> Self { + Self { + header: Vec::new(), + content_length: 0, + padding_length: 0, + state: FcgiDecodeState::ReadingHead, + } + } +} + +impl Decoder for FcgiDecoder { + type Error = std::io::Error; + type Item = FcgiDecodedData; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + loop { + match self.state { + FcgiDecodeState::ReadingHead => { + if src.len() >= 8 { + let header = &src[..8]; + self.header = header.to_vec(); + src.advance(8); + self.content_length = u16::from_be_bytes( + self.header[4..6] + .try_into() + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?, + ); + self.padding_length = self.header[6]; + self.state = FcgiDecodeState::ReadingContent; + } else { + return Ok(None); + } + } + FcgiDecodeState::ReadingContent => { + if src.len() >= self.content_length as usize + self.padding_length as usize { + let request_id = u16::from_be_bytes( + self.header[2..4] + .try_into() + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?, + ); + let record_type = self.header[1]; + if request_id != 1 || (record_type != 3 && record_type != 6 && record_type != 7) { + // Ignore the record for wrong request ID or if the record isn't END_REQUEST, STDOUT or STDERR + src.advance(self.content_length as usize + self.padding_length as usize); + return Ok(None); + } + let content_borrowed = &src[..(self.content_length as usize)]; + let content = content_borrowed.to_vec(); + src.advance(self.content_length as usize + self.padding_length as usize); + + match record_type { + 3 => { + // END_REQUEST record + if content.len() > 5 { + let app_status = u32::from_be_bytes( + content[0..4] + .try_into() + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?, + ); + let protocol_status = content[4]; + match protocol_status { + 0 => (), + 1 => return Err(std::io::Error::other("FastCGI server overloaded")), + 2 => { + return Err(std::io::Error::other( + "Role not supported by the FastCGI application", + )) + } + 3 => { + return Err(std::io::Error::other( + "Multiplexed connections not supported by the FastCGI application", + )) + } + _ => return Err(std::io::Error::other("Unknown error")), + } + + self.state = FcgiDecodeState::Finished; + if app_status != 0 { + // Inject data into standard error stream + return Ok(Some(FcgiDecodedData::Stderr(Bytes::from_owner(format!( + "FastCGI application exited with code {}", + app_status + ))))); + } + } else { + // Record malformed, ignoring the record + return Ok(None); + } + } + 6 => { + // STDOUT record + self.state = FcgiDecodeState::ReadingHead; + if content.is_empty() { + return Ok(None); + } + return Ok(Some(FcgiDecodedData::Stdout(Bytes::from_owner(content)))); + } + 7 => { + // STDERR record + self.state = FcgiDecodeState::ReadingHead; + if content.is_empty() { + return Ok(None); + } + return Ok(Some(FcgiDecodedData::Stderr(Bytes::from_owner(content)))); + } + _ => { + // This should be unreachable + unreachable!() + } + }; + } else { + return Ok(None); + } + } + FcgiDecodeState::Finished => { + src.clear(); + return Ok(None); + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::ferron_util::fcgi_record::construct_fastcgi_record; + use tokio_util::bytes::BytesMut; + use tokio_util::codec::Decoder; + + #[test] + fn test_fcgi_decoder_stdout() { + let mut decoder = FcgiDecoder::new(); + let mut buf = BytesMut::new(); + + // Construct a STDOUT record + let record_type = 6; + let request_id = 1; + let content = b"Hello, FastCGI!"; + let record = construct_fastcgi_record(record_type, request_id, content); + + buf.extend_from_slice(&record); + + let result = decoder.decode(&mut buf).unwrap(); + assert!(result.is_some()); + if let Some(FcgiDecodedData::Stdout(data)) = result { + assert_eq!(&data[..], content); + } else { + panic!("Expected STDOUT data"); + } + } + + #[test] + fn test_fcgi_decoder_stderr() { + let mut decoder = FcgiDecoder::new(); + let mut buf = BytesMut::new(); + + // Construct a STDERR record + let record_type = 7; + let request_id = 1; + let content = b"Error message"; + let record = construct_fastcgi_record(record_type, request_id, content); + + buf.extend_from_slice(&record); + + let result = decoder.decode(&mut buf).unwrap(); + assert!(result.is_some()); + if let Some(FcgiDecodedData::Stderr(data)) = result { + assert_eq!(&data[..], content); + } else { + panic!("Expected STDERR data"); + } + } + + #[test] + fn test_fcgi_decoder_end_request() { + let mut decoder = FcgiDecoder::new(); + let mut buf = BytesMut::new(); + + // Construct an END_REQUEST record + let record_type = 3; + let request_id = 1; + let mut content = [0u8; 4].to_vec(); // App status + content.push(0); // Protocol status + let record = construct_fastcgi_record(record_type, request_id, &content); + + buf.extend_from_slice(&record); + + let result = decoder.decode(&mut buf).unwrap(); + assert!(result.is_none()); // No data for END_REQUEST + } + + #[test] + fn test_fcgi_decoder_invalid_record() { + let mut decoder = FcgiDecoder::new(); + let mut buf = BytesMut::new(); + + // Construct an invalid record with wrong request ID + let record_type = 6; + let request_id = 2; // Invalid request ID + let content = b"Invalid record"; + let record = construct_fastcgi_record(record_type, request_id, content); + + buf.extend_from_slice(&record); + + let result = decoder.decode(&mut buf).unwrap(); + assert!(result.is_none()); // Invalid record should be ignored + } +} diff --git a/ferron/src/util/fcgi_encoder.rs b/ferron/src/util/fcgi_encoder.rs new file mode 100644 index 0000000000000000000000000000000000000000..f6ea79ad5d8962f94f47e2884323d8cefcbe8c0a --- /dev/null +++ b/ferron/src/util/fcgi_encoder.rs @@ -0,0 +1,64 @@ +use tokio_util::bytes::{BufMut, BytesMut}; +use tokio_util::codec::Encoder; + +use crate::ferron_util::fcgi_record::construct_fastcgi_record; + +pub struct FcgiEncoder; + +impl FcgiEncoder { + pub fn new() -> Self { + Self + } +} + +impl Encoder<&[u8]> for FcgiEncoder { + type Error = std::io::Error; + + fn encode(&mut self, item: &[u8], dst: &mut BytesMut) -> Result<(), Self::Error> { + let mut offset = 0; + let mut first_written = false; + while offset < item.len() || (item.is_empty() && !first_written) { + let chunk_size = std::cmp::min(65535, item.len() - offset); + let chunk = &item[offset..offset + chunk_size]; + + // Record type 5 means STDIN + let record = construct_fastcgi_record(5, 1, chunk); + dst.put(record.as_slice()); + + first_written = true; + offset += chunk_size; + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tokio_util::codec::Encoder; + + #[test] + fn test_fcgi_encoder() { + let mut encoder = FcgiEncoder::new(); + let mut dst = BytesMut::new(); + let item = b"Test data"; + + encoder.encode(item, &mut dst).unwrap(); + + // Expected encoded record structure + let expected_record = vec![ + 1, // FCGI_VERSION_1 + 5, // Record type + 0, 1, // Request ID (big-endian) + 0, 9, // Content length (big-endian) + 7, // Padding length + 0, // Reserved + 84, 101, 115, 116, 32, // Content: "Test " + 100, 97, 116, 97, // Content: "data" + 0, 0, 0, 0, 0, 0, 0, // Padding + ]; + + assert_eq!(dst.to_vec(), expected_record); + } +} diff --git a/ferron/src/util/fcgi_name_value_pair.rs b/ferron/src/util/fcgi_name_value_pair.rs new file mode 100644 index 0000000000000000000000000000000000000000..3933012b3678477a650d49dc99de6b53f9652085 --- /dev/null +++ b/ferron/src/util/fcgi_name_value_pair.rs @@ -0,0 +1,95 @@ +pub fn construct_fastcgi_name_value_pair(name: &[u8], value: &[u8]) -> Vec { + let mut name_value_pair = Vec::new(); + + // Name length + let name_length = name.len(); + if name_length < 128 { + name_value_pair.extend_from_slice(&(name_length as u8).to_be_bytes()); + } else { + name_value_pair.extend_from_slice(&((name_length as u32) | 0x80000000).to_be_bytes()); + } + + // Value length + let value_length = value.len(); + if value_length < 128 { + name_value_pair.extend_from_slice(&(value_length as u8).to_be_bytes()); + } else { + name_value_pair.extend_from_slice(&((value_length as u32) | 0x80000000).to_be_bytes()); + } + + // Name + name_value_pair.extend_from_slice(name); + + // Value + name_value_pair.extend_from_slice(value); + + name_value_pair +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_short_name_and_value() { + let name = b"HOST"; + let value = b"localhost"; + let expected = vec![ + 0x04, // Name length (4 bytes) + 0x09, // Value length (9 bytes) + b'H', b'O', b'S', b'T', // Name + b'l', b'o', b'c', b'a', b'l', b'h', b'o', b's', b't', // Value + ]; + assert_eq!(construct_fastcgi_name_value_pair(name, value), expected); + } + + #[test] + fn test_long_name_and_value() { + let name = vec![b'N'; 130]; // Name length 130 + let value = vec![b'V'; 135]; // Value length 135 + let mut expected = vec![ + 0x80, 0x00, 0x00, 0x82, // Name length (130 bytes) + 0x80, 0x00, 0x00, 0x87, // Value length (135 bytes) + ]; + expected.extend_from_slice(&name); + expected.extend_from_slice(&value); + assert_eq!(construct_fastcgi_name_value_pair(&name, &value), expected); + } + + #[test] + fn test_empty_name_and_value() { + let name = b""; + let value = b""; + let expected = vec![ + 0x00, // Name length (0 bytes) + 0x00, // Value length (0 bytes) + ]; + assert_eq!(construct_fastcgi_name_value_pair(name, value), expected); + } + + #[test] + fn test_name_length_127() { + let name = vec![b'a'; 127]; + let value = b"value"; + let mut expected = vec![ + 0x7f, // Name length (127 bytes) + 0x05, // Value length (5 bytes) + ]; + expected.extend_from_slice(&name); + expected.extend_from_slice(value); + assert_eq!(construct_fastcgi_name_value_pair(&name, value), expected); + } + + #[test] + fn test_value_length_127() { + let name = b"name"; + let value = vec![b'b'; 127]; + let mut expected = vec![ + 0x04, // Name length (4 bytes) + 0x7f, // Value length (127 bytes) + ]; + expected.extend_from_slice(name); + expected.extend_from_slice(&value); + assert_eq!(construct_fastcgi_name_value_pair(name, &value), expected); + } +} diff --git a/ferron/src/util/fcgi_record.rs b/ferron/src/util/fcgi_record.rs new file mode 100644 index 0000000000000000000000000000000000000000..5551cf0f25062d65d2cd65b0ddc50829f38ba915 --- /dev/null +++ b/ferron/src/util/fcgi_record.rs @@ -0,0 +1,97 @@ +pub fn construct_fastcgi_record(record_type: u8, request_id: u16, content: &[u8]) -> Vec { + let mut record = Vec::new(); + + // FastCGI version: FCGI_VERSION_1 + record.push(1); + + // Record type + record.extend_from_slice(&(record_type.to_be_bytes())); + + // Request ID + record.extend_from_slice(&(request_id.to_be_bytes())); + + // Content length + let content_length = content.len() as u16; + record.extend_from_slice(&(content_length.to_be_bytes())); + + // Padding length + let content_length_modulo = (content_length % 8) as u8; + let padding_length = match content_length_modulo { + 0 => 0, + _ => 8 - content_length_modulo, + }; + record.extend_from_slice(&(padding_length.to_be_bytes())); + + // Reserved + record.push(0); + + // Content + record.extend_from_slice(content); + + // Padding + record.append(&mut vec![0u8; padding_length as usize]); + + record +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_construct_fastcgi_record() { + // Test case 1: Empty content + let record_type = 1; + let request_id = 1234; + let content: &[u8] = &[]; + let expected_record = vec![ + 1, // FastCGI version + 1, // Record type + 4, 210, // Request ID + 0, 0, // Content length + 0, // Padding length + 0, // Reserved + ]; + assert_eq!( + construct_fastcgi_record(record_type, request_id, content), + expected_record + ); + + // Test case 2: Content with length 5 + let record_type = 2; + let request_id = 5678; + let content = b"Hello"; + let expected_record = vec![ + 1, // FastCGI version + 2, // Record type + 22, 46, // Request ID + 0, 5, // Content length + 3, // Padding length + 0, // Reserved + 72, 101, 108, 108, 111, // Content + 0, 0, 0, // Padding + ]; + assert_eq!( + construct_fastcgi_record(record_type, request_id, content), + expected_record + ); + + // Test case 3: Content with length 8 (no padding needed) + let record_type = 3; + let request_id = 9012; + let content = b"12345678"; + let expected_record = vec![ + 1, // FastCGI version + 3, // Record type + 35, 52, // Request ID + 0, 8, // Content length + 0, // Padding length + 0, // Reserved + 49, 50, 51, 52, 53, 54, 55, 56, // Content + ]; + assert_eq!( + construct_fastcgi_record(record_type, request_id, content), + expected_record + ); + } +} diff --git a/ferron/src/util/generate_directory_listing.rs b/ferron/src/util/generate_directory_listing.rs new file mode 100644 index 0000000000000000000000000000000000000000..04ec50c2458ca5bc56120271f0ae20ce7588821f --- /dev/null +++ b/ferron/src/util/generate_directory_listing.rs @@ -0,0 +1,129 @@ +use std::error::Error; + +use chrono::{DateTime, Local}; +use tokio::fs::ReadDir; + +use crate::ferron_util::anti_xss::anti_xss; +use crate::ferron_util::sizify::sizify; + +pub async fn generate_directory_listing( + mut directory: ReadDir, + request_path: &str, + description: Option, +) -> Result> { + let mut request_path_without_trailing_slashes = request_path; + while request_path_without_trailing_slashes.ends_with("/") { + request_path_without_trailing_slashes = + &request_path_without_trailing_slashes[..(request_path_without_trailing_slashes.len() - 1)]; + } + + // Return path + let mut return_path_vec: Vec<&str> = request_path_without_trailing_slashes.split("/").collect(); + return_path_vec.pop(); + return_path_vec.push(""); + let return_path = &return_path_vec.join("/") as &str; + + let mut table_rows = Vec::new(); + if !request_path_without_trailing_slashes.is_empty() { + table_rows.push(format!( + "Return", + anti_xss(return_path) + )); + } + let min_table_rows_length = table_rows.len(); + + // Create a vector containing entries, then sort them by file name. + let mut entries = Vec::new(); + while let Some(entry) = directory.next_entry().await? { + entries.push(entry); + } + entries.sort_by_cached_key(|entry| entry.file_name().to_string_lossy().to_string()); + + for entry in entries.iter() { + let filename = entry.file_name().to_string_lossy().to_string(); + if filename.starts_with('.') { + // Don't add files nor directories with "." at the beginning of their names + continue; + } + match entry.metadata().await { + Ok(metadata) => { + let filename_link = format!( + "{}", + request_path_without_trailing_slashes, + anti_xss(urlencoding::encode(&filename).as_ref()), + match metadata.is_dir() { + true => "/", + false => "", + }, + anti_xss(&filename) + ); + + let row = format!( + "{}{}{}", + filename_link, + match metadata.is_file() { + true => anti_xss(&sizify(metadata.len(), false)), + false => "-".to_string(), + }, + anti_xss( + &(match metadata.modified() { + Ok(mtime) => { + let datetime: DateTime = mtime.into(); + datetime.format("%a %b %d %Y").to_string() + } + Err(_) => "-".to_string(), + }) + ) + ); + table_rows.push(row); + } + Err(_) => { + let filename_link = format!( + "{}", + "{}{}", + request_path_without_trailing_slashes, + anti_xss(urlencoding::encode(&filename).as_ref()), + anti_xss(&filename) + ); + let row = format!("{}--", filename_link); + table_rows.push(row); + } + }; + } + + if table_rows.len() < min_table_rows_length { + table_rows.push("No files found".to_string()); + } + + Ok(format!( + " + + + + + Directory: {} + + +

Directory: {}

+ + + {} + {} +
FilenameSizeDate
+ +", + anti_xss(request_path), + anti_xss(request_path), + table_rows.join(""), + match description { + Some(description) => format!( + "
{}", + anti_xss(&description) + .replace("\r\n", "\n") + .replace("\r", "\n") + .replace("\n", "
") + ), + None => "".to_string(), + } + )) +} diff --git a/ferron/src/util/ip_blocklist.rs b/ferron/src/util/ip_blocklist.rs new file mode 100644 index 0000000000000000000000000000000000000000..1d6fcc25e2f90184da14ce021bfc18a5be30e2fb --- /dev/null +++ b/ferron/src/util/ip_blocklist.rs @@ -0,0 +1,53 @@ +use std::collections::HashSet; +use std::net::{IpAddr, Ipv6Addr}; + +pub struct IpBlockList { + blocked_ips: HashSet, +} + +impl IpBlockList { + // Create a new empty block list + pub fn new() -> Self { + Self { + blocked_ips: HashSet::new(), + } + } + + // Load the block list from a vector of IP address strings + pub fn load_from_vec(&mut self, ip_list: Vec<&str>) { + for ip_str in ip_list { + match ip_str { + "localhost" => { + self + .blocked_ips + .insert(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into()); + } + _ => { + if let Ok(ip) = ip_str.parse::() { + self.blocked_ips.insert(ip.to_canonical()); + } + } + } + } + } + + // Check if an IP address is blocked + pub fn is_blocked(&self, ip: IpAddr) -> bool { + self.blocked_ips.contains(&ip.to_canonical()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_ip_block_list() { + let mut block_list = IpBlockList::new(); + block_list.load_from_vec(vec!["192.168.1.1", "10.0.0.1"]); + + assert!(block_list.is_blocked("192.168.1.1".parse().unwrap())); + assert!(block_list.is_blocked("10.0.0.1".parse().unwrap())); + assert!(!block_list.is_blocked("8.8.8.8".parse().unwrap())); + } +} diff --git a/ferron/src/util/ip_match.rs b/ferron/src/util/ip_match.rs new file mode 100644 index 0000000000000000000000000000000000000000..ecd41203fc5d69fb2d17372e3bc8c7f64d4f8f69 --- /dev/null +++ b/ferron/src/util/ip_match.rs @@ -0,0 +1,68 @@ +use std::net::{IpAddr, Ipv6Addr}; + +pub fn ip_match(ip1: &str, ip2: IpAddr) -> bool { + let ip1_processed: IpAddr = match ip1 { + "localhost" => Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into(), + _ => match ip1.parse() { + Ok(ip_processed) => ip_processed, + Err(_) => return false, + }, + }; + + ip1_processed == ip2 +} + +#[cfg(test)] +mod tests { + use super::*; + use std::net::{IpAddr, Ipv6Addr}; + + #[test] + fn test_ip_match_with_valid_ipv6() { + let ip1 = "2001:0db8:85a3:0000:0000:8a2e:0370:7334"; + let ip2 = ip1.parse::().unwrap(); + assert!(ip_match(ip1, ip2)); + } + + #[test] + fn test_ip_match_with_valid_ipv4() { + let ip1 = "192.168.1.1"; + let ip2 = ip1.parse::().unwrap(); + assert!(ip_match(ip1, ip2)); + } + + #[test] + fn test_ip_match_with_localhost() { + let ip1 = "localhost"; + let ip2 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into(); + assert!(ip_match(ip1, ip2)); + } + + #[test] + fn test_ip_match_with_invalid_ip() { + let ip1 = "invalid_ip"; + let ip2 = "192.168.1.1".parse::().unwrap(); + assert!(!ip_match(ip1, ip2)); + } + + #[test] + fn test_ip_match_with_different_ips() { + let ip1 = "192.168.1.1"; + let ip2 = "192.168.1.2".parse::().unwrap(); + assert!(!ip_match(ip1, ip2)); + } + + #[test] + fn test_ip_match_with_empty_string() { + let ip1 = ""; + let ip2 = "192.168.1.1".parse::().unwrap(); + assert!(!ip_match(ip1, ip2)); + } + + #[test] + fn test_ip_match_with_localhost_and_different_ip() { + let ip1 = "localhost"; + let ip2 = "192.168.1.1".parse::().unwrap(); + assert!(!ip_match(ip1, ip2)); + } +} diff --git a/ferron/src/util/load_config.rs b/ferron/src/util/load_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..66f212368a9830d2f15f0b3194e0fe29fbf747c8 --- /dev/null +++ b/ferron/src/util/load_config.rs @@ -0,0 +1,173 @@ +use std::fs; +use std::path::PathBuf; +use std::str::FromStr; +use std::{collections::HashSet, error::Error}; + +use glob::glob; +use yaml_rust2::{Yaml, YamlLoader}; + +pub fn load_config(path: PathBuf) -> Result> { + load_config_inner(path, &mut HashSet::new()) +} + +fn load_config_inner( + path: PathBuf, + loaded_paths: &mut HashSet, +) -> Result> { + // Canonicalize the path + let canonical_pathbuf = fs::canonicalize(&path).unwrap_or_else(|_| path.clone()); + + // Check if the path is duplicate. If it's not, add it to loaded paths. + if loaded_paths.contains(&canonical_pathbuf) { + let canonical_path = canonical_pathbuf.to_string_lossy().into_owned(); + + Err(anyhow::anyhow!( + "Detected the server configuration file include loop while attempting to load \"{}\"", + canonical_path + ))? + } else { + loaded_paths.insert(canonical_pathbuf.clone()); + } + + // Read the configuration file + let file_contents = match fs::read_to_string(&path) { + Ok(file) => file, + Err(err) => { + let canonical_path = canonical_pathbuf.to_string_lossy().into_owned(); + + Err(anyhow::anyhow!( + "Failed to read from the server configuration file at \"{}\": {}", + canonical_path, + err + ))? + } + }; + + // Load YAML configuration from the file contents + let yaml_configs = match YamlLoader::load_from_str(&file_contents) { + Ok(yaml_configs) => yaml_configs, + Err(err) => Err(anyhow::anyhow!( + "Failed to parse the server configuration file: {}", + err + ))?, + }; + + // Ensure the YAML file is not empty + if yaml_configs.is_empty() { + Err(anyhow::anyhow!( + "No YAML documents detected in the server configuration file." + ))?; + } + let mut yaml_config = yaml_configs[0].clone(); // Clone the first YAML document + + if yaml_config.is_hash() { + // Get the list of included files + let mut include_files = Vec::new(); + if let Some(include_yaml) = yaml_config["include"].as_vec() { + for include_one_yaml in include_yaml.iter() { + if let Some(include_glob) = include_one_yaml.as_str() { + let include_glob_pathbuf = match PathBuf::from_str(include_glob) { + Ok(pathbuf) => pathbuf, + Err(err) => { + let canonical_path = canonical_pathbuf.to_string_lossy().into_owned(); + + Err(anyhow::anyhow!( + "Failed to determine includes for the server configuration file at \"{}\": {}", + canonical_path, + err + ))? + } + }; + let include_glob_pathbuf_canonicalized = if include_glob_pathbuf.is_absolute() { + include_glob_pathbuf + } else { + let mut canonical_dirname = canonical_pathbuf.clone(); + canonical_dirname.pop(); + canonical_dirname.join(include_glob_pathbuf) + }; + let files_globbed = match glob(&include_glob_pathbuf_canonicalized.to_string_lossy()) { + Ok(files_globbed) => files_globbed, + Err(err) => { + let canonical_path = canonical_pathbuf.to_string_lossy().into_owned(); + + Err(anyhow::anyhow!( + "Failed to determine includes for the server configuration file at \"{}\": {}", + canonical_path, + err + ))? + } + }; + + for file_globbed_result in files_globbed { + let file_globbed = match file_globbed_result { + Ok(file_globbed) => file_globbed, + Err(err) => { + let canonical_path = canonical_pathbuf.to_string_lossy().into_owned(); + + Err(anyhow::anyhow!( + "Failed to determine includes for the server configuration file at \"{}\": {}", + canonical_path, + err + ))? + } + }; + include_files + .push(fs::canonicalize(&file_globbed).unwrap_or_else(|_| file_globbed.clone())); + } + } + } + } + + // Delete included configuration from YAML configuration + if let Some(yaml_config_hash) = yaml_config.as_mut_hash() { + yaml_config_hash.remove(&Yaml::String("include".to_string())); + + // Merge included configuration + for included_file in include_files { + let yaml_to_include = load_config_inner(included_file, loaded_paths)?; + if let Some(yaml_to_include_hashmap) = yaml_to_include.as_hash() { + for (key, value) in yaml_to_include_hashmap.iter() { + if let Some(key) = key.as_str() { + if key != "include" { + match value { + Yaml::Array(host_array) => { + yaml_config_hash + .entry(Yaml::String(key.to_string())) + .and_modify(|global_val| { + if let Yaml::Array(global_array) = global_val { + global_array.extend(host_array.clone()); + } else { + *global_val = Yaml::Array(host_array.clone()); + } + }) + .or_insert_with(|| Yaml::Array(host_array.clone())); + } + Yaml::Hash(host_hash) => { + yaml_config_hash + .entry(Yaml::String(key.to_string())) + .and_modify(|global_val| { + if let Yaml::Hash(global_hash) = global_val { + for (k, v) in host_hash { + global_hash.insert(k.clone(), v.clone()); + } + } else { + *global_val = Yaml::Hash(host_hash.clone()); + } + }) + .or_insert_with(|| Yaml::Hash(host_hash.clone())); + } + _ => { + yaml_config_hash.insert(Yaml::String(key.to_string()), value.clone()); + } + } + } + } + } + } + } + } + } + + // Return the server configuration + Ok(yaml_config) +} diff --git a/ferron/src/util/load_tls.rs b/ferron/src/util/load_tls.rs new file mode 100644 index 0000000000000000000000000000000000000000..e684e5b993e72ac2491da3493159868bd51d529c --- /dev/null +++ b/ferron/src/util/load_tls.rs @@ -0,0 +1,24 @@ +use rustls_pki_types::{CertificateDer, PrivateKeyDer}; + +// Load public certificate from file +pub fn load_certs(filename: &str) -> std::io::Result>> { + let certfile = std::fs::File::open(filename) + .map_err(|e| std::io::Error::other(format!("failed to open {}: {}", filename, e)))?; + let mut reader = std::io::BufReader::new(certfile); + rustls_pemfile::certs(&mut reader).collect() +} + +// Load private key from file +pub fn load_private_key(filename: &str) -> std::io::Result> { + let keyfile = std::fs::File::open(filename) + .map_err(|e| std::io::Error::other(format!("failed to open {}: {}", filename, e)))?; + let mut reader = std::io::BufReader::new(keyfile); + match rustls_pemfile::private_key(&mut reader) { + Ok(Some(private_key)) => Ok(private_key), + Ok(None) => Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid private key", + )), + Err(err) => Err(err), + } +} diff --git a/ferron/src/util/match_hostname.rs b/ferron/src/util/match_hostname.rs new file mode 100644 index 0000000000000000000000000000000000000000..74f3f0c9128c35e01f01ae71dd27753e8ea639d6 --- /dev/null +++ b/ferron/src/util/match_hostname.rs @@ -0,0 +1,103 @@ +// Hostname matching function from SVR.JS rewritten from JavaScript to Rust +pub fn match_hostname(hostname: Option<&str>, req_hostname: Option<&str>) -> bool { + if hostname.is_none() || hostname == Some("*") { + return true; + } + + if let (Some(hostname), Some(req_hostname)) = (hostname, req_hostname) { + if hostname.starts_with("*.") && hostname != "*." { + let hostnames_root = &hostname[2..]; + if req_hostname == hostnames_root + || (req_hostname.len() > hostnames_root.len() + && req_hostname.ends_with(&format!(".{}", hostnames_root)[..])) + { + return true; + } + } else if req_hostname == hostname { + return true; + } + } + + false +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_return_true_if_hostname_is_undefined() { + assert!(match_hostname(None, Some("example.com"))); + } + + #[test] + fn should_return_true_if_hostname_is_star() { + assert!(match_hostname(Some("*"), Some("example.com"))); + } + + #[test] + fn should_return_true_if_req_hostname_matches_hostname_exactly() { + assert!(match_hostname(Some("example.com"), Some("example.com"))); + } + + #[test] + fn should_return_false_if_req_hostname_does_not_match_hostname_exactly() { + assert!(!match_hostname(Some("example.com"), Some("example.org"))); + } + + #[test] + fn should_return_true_if_hostname_starts_with_star_dot_and_req_hostname_matches_the_root() { + assert!(match_hostname( + Some("*.example.com"), + Some("sub.example.com") + )); + } + + #[test] + fn should_return_false_if_hostname_starts_with_star_dot_and_req_hostname_does_not_match_the_root() + { + assert!(!match_hostname(Some("*.example.com"), Some("example.org"))); + } + + #[test] + fn should_return_true_if_hostname_starts_with_star_dot_and_req_hostname_is_the_root() { + assert!(match_hostname(Some("*.example.com"), Some("example.com"))); + } + + #[test] + fn should_return_false_if_hostname_is_star_dot() { + assert!(!match_hostname(Some("*."), Some("example.com"))); + } + + #[test] + fn should_return_false_if_req_hostname_is_undefined() { + assert!(!match_hostname(Some("example.com"), None)); + } + + #[test] + fn should_return_false_if_hostname_does_not_start_with_star_dot_and_req_hostname_does_not_match() + { + assert!(!match_hostname( + Some("sub.example.com"), + Some("example.com") + )); + } + + #[test] + fn should_return_true_if_hostname_starts_with_star_dot_and_req_hostname_matches_the_root_with_additional_subdomains( + ) { + assert!(match_hostname( + Some("*.example.com"), + Some("sub.sub.example.com") + )); + } + + #[test] + fn should_return_false_if_hostname_starts_with_star_dot_and_req_hostname_does_not_match_the_root_with_additional_subdomains( + ) { + assert!(!match_hostname( + Some("*.example.com"), + Some("sub.sub.example.org") + )); + } +} diff --git a/ferron/src/util/match_location.rs b/ferron/src/util/match_location.rs new file mode 100644 index 0000000000000000000000000000000000000000..30cea9779fd7643ccce3d447b2afa68d0f9b9e6c --- /dev/null +++ b/ferron/src/util/match_location.rs @@ -0,0 +1,71 @@ +pub fn match_location(path: &str, req_path: &str) -> bool { + let mut path_without_trailing_slashes = path; + while path_without_trailing_slashes.ends_with("/") { + path_without_trailing_slashes = + &path_without_trailing_slashes[..(path_without_trailing_slashes.len() - 1)]; + } + + let mut path_prepared = path_without_trailing_slashes.to_owned(); + let mut req_path_prepared = req_path.to_owned(); + + while path_prepared.contains("//") { + path_prepared = path_prepared.replace("//", "/"); + } + + while req_path_prepared.contains("//") { + req_path_prepared = path_prepared.replace("//", "/"); + } + + if cfg!(windows) { + path_prepared = path_prepared.to_lowercase(); + req_path_prepared = req_path_prepared.to_lowercase(); + } + + path_prepared == req_path_prepared + || req_path_prepared.starts_with(&format!("{}/", path_prepared)) +} + +#[cfg(test)] +mod tests { + use super::match_location; + + #[test] + fn test_exact_match() { + assert!(match_location("/home", "/home")); + assert!(match_location("/api/v1", "/api/v1")); + } + + #[test] + fn test_trailing_slash() { + assert!(match_location("/home/", "/home")); + assert!(match_location("/home", "/home/")); + assert!(match_location("/api/v1/", "/api/v1")); + } + + #[test] + fn test_subpath_match() { + assert!(match_location("/api", "/api/v1")); + assert!(match_location("/users", "/users/profile")); + } + + #[test] + fn test_non_matching_paths() { + assert!(!match_location("/home", "/dashboard")); + assert!(!match_location("/api", "/user")); + } + + #[test] + fn test_multiple_slashes() { + assert!(match_location("/api//v1", "/api/v1")); + assert!(match_location("//home///", "/home")); + } + + #[test] + fn test_case_insensitivity_on_windows() { + #[cfg(windows)] + { + assert!(match_location("/API", "/api")); + assert!(match_location("/Home", "/home")); + } + } +} diff --git a/ferron/src/util/no_server_verifier.rs b/ferron/src/util/no_server_verifier.rs new file mode 100644 index 0000000000000000000000000000000000000000..6bb5a0ee27c1e6beccb63454fb5fc88e666054d9 --- /dev/null +++ b/ferron/src/util/no_server_verifier.rs @@ -0,0 +1,59 @@ +use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier}; +use rustls::DigitallySignedStruct; +use rustls::SignatureScheme::{self, *}; +use rustls_pki_types::{CertificateDer, ServerName, UnixTime}; + +#[derive(Debug)] +pub struct NoServerVerifier; + +impl NoServerVerifier { + pub fn new() -> Self { + Self + } +} + +impl ServerCertVerifier for NoServerVerifier { + fn verify_server_cert( + &self, + _end_entity: &CertificateDer<'_>, + _intermediates: &[CertificateDer<'_>], + _server_name: &ServerName<'_>, + _ocsp_response: &[u8], + _now: UnixTime, + ) -> Result { + Ok(ServerCertVerified::assertion()) + } + + fn verify_tls12_signature( + &self, + _message: &[u8], + _cert: &CertificateDer<'_>, + _dss: &DigitallySignedStruct, + ) -> Result { + Ok(HandshakeSignatureValid::assertion()) + } + + fn verify_tls13_signature( + &self, + _message: &[u8], + _cert: &CertificateDer<'_>, + _dss: &DigitallySignedStruct, + ) -> Result { + Ok(HandshakeSignatureValid::assertion()) + } + + fn supported_verify_schemes(&self) -> Vec { + // Extend the list when necessary + vec![ + ECDSA_NISTP384_SHA384, + ECDSA_NISTP256_SHA256, + ED25519, + RSA_PSS_SHA512, + RSA_PSS_SHA384, + RSA_PSS_SHA256, + RSA_PKCS1_SHA512, + RSA_PKCS1_SHA384, + RSA_PKCS1_SHA256, + ] + } +} diff --git a/ferron/src/util/non_standard_code_structs.rs b/ferron/src/util/non_standard_code_structs.rs new file mode 100644 index 0000000000000000000000000000000000000000..02bf1a27589deace152bb78d6bebcf5bf2775a3b --- /dev/null +++ b/ferron/src/util/non_standard_code_structs.rs @@ -0,0 +1,76 @@ +use crate::ferron_util::ip_blocklist::IpBlockList; +use fancy_regex::Regex; + +#[allow(dead_code)] +pub struct NonStandardCode { + pub status_code: u16, + pub url: Option, + pub regex: Option, + pub location: Option, + pub realm: Option, + pub disable_brute_force_protection: bool, + pub user_list: Option>, + pub users: Option, +} + +impl NonStandardCode { + #[allow(clippy::too_many_arguments)] + pub fn new( + status_code: u16, + url: Option, + regex: Option, + location: Option, + realm: Option, + disable_brute_force_protection: bool, + user_list: Option>, + users: Option, + ) -> Self { + Self { + status_code, + url, + regex, + location, + realm, + disable_brute_force_protection, + user_list, + users, + } + } +} + +pub struct NonStandardCodesWrap { + pub domain: Option, + pub ip: Option, + pub non_standard_codes: Vec, + pub locations: Vec, +} + +impl NonStandardCodesWrap { + pub fn new( + domain: Option, + ip: Option, + non_standard_codes: Vec, + locations: Vec, + ) -> Self { + Self { + domain, + ip, + non_standard_codes, + locations, + } + } +} + +pub struct NonStandardCodesLocationWrap { + pub path: String, + pub non_standard_codes: Vec, +} + +impl NonStandardCodesLocationWrap { + pub fn new(path: String, non_standard_codes: Vec) -> Self { + Self { + path, + non_standard_codes, + } + } +} diff --git a/ferron/src/util/preforked_process_pool.rs b/ferron/src/util/preforked_process_pool.rs new file mode 100644 index 0000000000000000000000000000000000000000..98ff1db0883a1a5ba6b2b46939d79298198c52af --- /dev/null +++ b/ferron/src/util/preforked_process_pool.rs @@ -0,0 +1,275 @@ +use std::error::Error; +use std::io::{Read, Write}; +use std::os::fd::OwnedFd; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; + +use interprocess::os::unix::unnamed_pipe::UnnamedPipeExt; +use interprocess::unnamed_pipe::tokio::{Recver as TokioRecver, Sender as TokioSender}; +use interprocess::unnamed_pipe::{Recver, Sender}; +use nix::sys::signal::{SigSet, SigmaskHow}; +use nix::unistd::{ForkResult, Pid}; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::sync::{Mutex, RwLock}; +use tokio_util::bytes::BufMut; + +#[allow(clippy::type_complexity)] +pub struct PreforkedProcessPool { + inner: Vec<( + Arc>, std::io::Error>>>>, + Pid, + Arc>>, + )>, + async_ipc_initialized: Arc>, +} + +impl PreforkedProcessPool { + // This function is `unsafe`, due to forking function in `nix` crate also being `unsafe`. + pub unsafe fn new( + num_processes: usize, + pool_fn: impl Fn(Sender, Recver), + ) -> Result> { + let mut processes = Vec::new(); + for _ in 0..num_processes { + // Create unnamed pipes + let (tx_parent, rx_child) = interprocess::unnamed_pipe::pipe()?; + let (tx_child, rx_parent) = interprocess::unnamed_pipe::pipe()?; + + // Set parent pipes to be non-blocking, because they'll be used in an asynchronous context + tx_parent.set_nonblocking(true).unwrap_or_default(); + rx_parent.set_nonblocking(true).unwrap_or_default(); + + // Obtain the file descriptors of the pipes + let tx_parent_fd: OwnedFd = tx_parent.into(); + let rx_parent_fd: OwnedFd = rx_parent.into(); + let tx_child_fd: OwnedFd = tx_child.into(); + let rx_child_fd: OwnedFd = rx_child.into(); + + match nix::unistd::fork() { + Ok(ForkResult::Parent { child }) => { + processes.push(( + Arc::new(RwLock::new(None)), + child, + Arc::new(Mutex::new(Some((tx_parent_fd, rx_parent_fd)))), + )); + } + Ok(ForkResult::Child) => { + // Block all the signals + nix::sys::signal::sigprocmask(SigmaskHow::SIG_SETMASK, Some(&SigSet::all()), None) + .unwrap_or_default(); + + #[cfg(target_os = "linux")] + { + // Stop child process after the parent process is stopped on Linux systems + nix::sys::prctl::set_pdeathsig(nix::sys::signal::SIGKILL).unwrap_or_default(); + } + + pool_fn(tx_child_fd.into(), rx_child_fd.into()); + + // Exit the process in the process pool + std::process::exit(0); + } + Err(errno) => { + Err(errno)?; + } + } + } + Ok(Self { + inner: processes, + async_ipc_initialized: Arc::new(RwLock::new(AtomicBool::new(false))), + }) + } + + pub async fn init_async_ipc(&self) { + if !self + .async_ipc_initialized + .read() + .await + .load(Ordering::Relaxed) + { + for inner_process in &self.inner { + let fds_option = inner_process.2.lock().await.take(); + if let Some((tx_fd, rx_fd)) = fds_option { + let ipc_io_result = match tx_fd.try_into() { + Ok(tx) => match rx_fd.try_into() { + Ok(rx) => Ok(Arc::new(Mutex::new((tx, rx)))), + Err(err) => Err(err), + }, + Err(err) => Err(err), + }; + + inner_process.0.write().await.replace(ipc_io_result); + } + } + + self + .async_ipc_initialized + .write() + .await + .store(true, Ordering::Relaxed); + } + } + + pub async fn obtain_process( + &self, + ) -> Result>, Box> { + if self.inner.is_empty() { + Err(anyhow::anyhow!( + "The process pool doesn't have any processes" + ))? + } else if self.inner.len() == 1 { + let process_option = self.inner[0].0.read().await; + let process = match process_option.as_ref() { + Some(arc_mutex_result) => arc_mutex_result + .as_ref() + .map_err(|e| std::io::Error::new(e.kind(), e.to_string()))?, + None => Err(anyhow::anyhow!("Asynchronous IPC not initialized yet"))?, + }; + Ok(process.clone()) + } else { + let first_random_choice = rand::random_range(0..self.inner.len()); + let second_random_choice_reduced = rand::random_range(0..self.inner.len() - 1); + let second_random_choice = if second_random_choice_reduced < first_random_choice { + second_random_choice_reduced + } else { + second_random_choice_reduced + 1 + }; + let first_random_process_option = self.inner[first_random_choice].0.read().await; + let second_random_process_option = self.inner[second_random_choice].0.read().await; + let first_random_process = match first_random_process_option.as_ref() { + Some(arc_mutex_result) => arc_mutex_result + .as_ref() + .map_err(|e| std::io::Error::new(e.kind(), e.to_string()))?, + None => Err(anyhow::anyhow!("Asynchronous IPC not initialized yet"))?, + }; + let second_random_process = match second_random_process_option.as_ref() { + Some(arc_mutex_result) => arc_mutex_result + .as_ref() + .map_err(|e| std::io::Error::new(e.kind(), e.to_string()))?, + None => Err(anyhow::anyhow!("Asynchronous IPC not initialized yet"))?, + }; + let first_random_process_reference = Arc::strong_count(first_random_process); + let second_random_process_reference = Arc::strong_count(second_random_process); + if first_random_process_reference < second_random_process_reference { + Ok(first_random_process.clone()) + } else { + Ok(second_random_process.clone()) + } + } + } + + pub async fn obtain_process_with_init_async_ipc( + &self, + ) -> Result>, Box> { + self.init_async_ipc().await; + self.obtain_process().await + } +} + +impl Drop for PreforkedProcessPool { + fn drop(&mut self) { + for inner_process in &self.inner { + // Kill processes in the process pool when dropping the process pool + nix::sys::signal::kill(inner_process.1, nix::sys::signal::SIGCHLD).unwrap_or_default(); + } + } +} + +pub fn read_ipc_message(rx: &mut Recver) -> Result, std::io::Error> { + let mut message_size_buffer = [0u8; 4]; + rx.read_exact(&mut message_size_buffer)?; + let message_size = u32::from_be_bytes(message_size_buffer); + + let mut buffer = vec![0u8; message_size as usize]; + rx.read_exact(&mut buffer)?; + Ok(buffer) +} + +pub async fn read_ipc_message_async(rx: &mut TokioRecver) -> Result, std::io::Error> { + let mut message_size_buffer = [0u8; 4]; + rx.read_exact(&mut message_size_buffer).await?; + let message_size = u32::from_be_bytes(message_size_buffer); + + let mut buffer = vec![0u8; message_size as usize]; + rx.read_exact(&mut buffer).await?; + Ok(buffer) +} + +pub fn write_ipc_message(tx: &mut Sender, message: &[u8]) -> Result<(), std::io::Error> { + let mut packet = Vec::new(); + packet.put_slice(&(message.len() as u32).to_be_bytes()); + packet.put_slice(message); + tx.write_all(&packet)?; + Ok(()) +} + +pub async fn write_ipc_message_async( + tx: &mut TokioSender, + message: &[u8], +) -> Result<(), std::io::Error> { + let mut packet = Vec::new(); + packet.put_slice(&(message.len() as u32).to_be_bytes()); + packet.put_slice(message); + tx.write_all(&packet).await?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::time::Duration; + use tokio::time::timeout; + + fn dummy_pool_fn(mut tx: Sender, mut rx: Recver) { + // Simulate child doing some work and echoing a message + while let Ok(message) = read_ipc_message(&mut rx) { + let _ = write_ipc_message(&mut tx, &message); + } + } + + #[tokio::test] + async fn test_process_pool_creation() { + let pool = unsafe { PreforkedProcessPool::new(2, dummy_pool_fn) }.unwrap(); + assert_eq!(pool.inner.len(), 2); + } + + #[tokio::test] + async fn test_obtain_process_and_communication() { + let pool = unsafe { PreforkedProcessPool::new(1, dummy_pool_fn) }.unwrap(); + let proc = pool.obtain_process_with_init_async_ipc().await.unwrap(); + let mut proc = proc.lock().await; + let (tx, rx) = &mut *proc; + + // Write and read a message + write_ipc_message_async(tx, b"hello").await.unwrap(); + let message = timeout(Duration::from_secs(2), read_ipc_message_async(rx)) + .await + .expect("Timed out reading") + .unwrap(); + + assert_eq!(&message, b"hello"); + } + + #[tokio::test] + async fn test_obtain_process_balancing() { + let pool = unsafe { PreforkedProcessPool::new(3, dummy_pool_fn) }.unwrap(); + + let _p1 = pool.obtain_process_with_init_async_ipc().await.unwrap(); + let _p2 = pool.obtain_process_with_init_async_ipc().await.unwrap(); + let _p3 = pool.obtain_process_with_init_async_ipc().await.unwrap(); + + // This ensures reference counts differ + let chosen = pool.obtain_process().await; + assert!(chosen.is_ok()); + } + + #[tokio::test] + async fn test_obtain_process_empty_pool() { + let empty_pool = PreforkedProcessPool { + inner: Vec::new(), + async_ipc_initialized: Arc::new(RwLock::new(AtomicBool::new(false))), + }; + let result = empty_pool.obtain_process_with_init_async_ipc().await; + assert!(result.is_err()); + } +} diff --git a/ferron/src/util/read_to_end_move.rs b/ferron/src/util/read_to_end_move.rs new file mode 100644 index 0000000000000000000000000000000000000000..4951c5b2eccd87d4b7a9cf05512e39d228788702 --- /dev/null +++ b/ferron/src/util/read_to_end_move.rs @@ -0,0 +1,122 @@ +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{self, AsyncRead, ReadBuf}; + +pub struct ReadToEndFuture { + reader: R, + buffer: Vec, +} + +impl ReadToEndFuture { + pub fn new(reader: R) -> Self { + Self { + reader, + buffer: Vec::new(), + } + } +} + +impl Future for ReadToEndFuture +where + R: AsyncRead + Unpin, +{ + type Output = io::Result>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut buf = [0; 1024]; + let mut read_buf = ReadBuf::new(&mut buf); + + loop { + match Pin::new(&mut self.reader).poll_read(cx, &mut read_buf) { + Poll::Ready(Ok(())) => { + let n = read_buf.filled().len(); + if n == 0 { + return Poll::Ready(Ok(self.buffer.clone())); + } + self.buffer.extend_from_slice(read_buf.filled()); + read_buf.clear(); + } + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + Poll::Pending => return Poll::Pending, + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::pin::Pin; + use std::task::{Context, Poll}; + use tokio::io::{self, AsyncRead}; + + struct MockReader { + data: Vec, + position: usize, + } + + impl MockReader { + fn new(data: &[u8]) -> Self { + Self { + data: data.to_vec(), + position: 0, + } + } + } + + impl AsyncRead for MockReader { + fn poll_read( + mut self: Pin<&mut Self>, + _cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + if self.position >= self.data.len() { + return Poll::Ready(Ok(())); + } + + let end = (self.position + buf.remaining()).min(self.data.len()); + buf.put_slice(&self.data[self.position..end]); + self.position = end; + + Poll::Ready(Ok(())) + } + } + + #[tokio::test] + async fn test_read_to_end_empty_reader() { + let reader = MockReader::new(&[]); + let future = ReadToEndFuture::new(reader); + let result = future.await; + assert_eq!(result.unwrap(), Vec::::new()); + } + + #[tokio::test] + async fn test_read_to_end_non_empty_reader() { + let reader = MockReader::new(b"hello world"); + let future = ReadToEndFuture::new(reader); + let result = future.await; + assert_eq!(result.unwrap(), b"hello world"); + } + + struct ErrorReader; + + impl AsyncRead for ErrorReader { + fn poll_read( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + _buf: &mut ReadBuf<'_>, + ) -> Poll> { + Poll::Ready(Err(io::Error::other("read error"))) + } + } + + #[tokio::test] + async fn test_read_to_end_error() { + let reader = ErrorReader; + let future = ReadToEndFuture::new(reader); + let result = future.await; + assert!(result.is_err()); + assert_eq!(result.unwrap_err().kind(), io::ErrorKind::Other); + } +} diff --git a/ferron/src/util/sizify.rs b/ferron/src/util/sizify.rs new file mode 100644 index 0000000000000000000000000000000000000000..2d5aac644719e2b3142461c1df65c162a7d27039 --- /dev/null +++ b/ferron/src/util/sizify.rs @@ -0,0 +1,92 @@ +// Sizify function taken from SVR.JS and rewritten from JavaScript to Rust +// SVR.JS is licensed under MIT, so below is the copyright notice: +// +// Copyright (c) 2018-2025 SVR.JS +// Portions of this file are derived from SVR.JS (https://git.svrjs.org/svrjs/svrjs). +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +pub fn sizify(bytes: u64, add_i: bool) -> String { + if bytes == 0 { + return "0".to_string(); + } + + let prefixes = ["", "K", "M", "G", "T", "P", "E", "Z", "Y", "R", "Q"]; + let prefix_index = ((bytes as f64).log2() / 10.0) + .floor() + .min(prefixes.len() as f64 - 1.0) as usize; + let prefix_index_translated = 2_i64.pow(10 * prefix_index as u32); + let decimal_points = ((2.0 + - (bytes as f64 / prefix_index_translated as f64) + .log10() + .floor()) as i32) + .max(0); + + let size = ((bytes as f64 / prefix_index_translated as f64) * 10_f64.powi(decimal_points)).ceil() + / 10_f64.powi(decimal_points); + let prefix = prefixes[prefix_index]; + let suffix = if prefix_index > 0 && add_i { "i" } else { "" }; + + format!("{}{}{}", size, prefix, suffix) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_sizify_zero_bytes() { + assert_eq!(sizify(0, false), "0"); + } + + #[test] + fn test_sizify_small_values() { + assert_eq!(sizify(1000, false), "1000"); + assert_eq!(sizify(1024, false), "1K"); + } + + #[test] + fn test_sizify_larger_values() { + assert_eq!(sizify(1048576, false), "1M"); + assert_eq!(sizify(1073741824, false), "1G"); + assert_eq!(sizify(1099511627776, false), "1T"); + assert_eq!(sizify(1125899906842624, false), "1P"); + assert_eq!(sizify(1152921504606846976, false), "1E"); + } + + #[test] + fn test_sizify_add_i_suffix() { + assert_eq!(sizify(1024, true), "1Ki"); + assert_eq!(sizify(1048576, true), "1Mi"); + assert_eq!(sizify(1073741824, true), "1Gi"); + } + + #[test] + fn test_sizify_no_i_suffix() { + assert_eq!(sizify(1024, false), "1K"); + assert_eq!(sizify(1048576, false), "1M"); + assert_eq!(sizify(1073741824, false), "1G"); + } + + #[test] + fn test_sizify_decimal_points() { + assert_eq!(sizify(1500, false), "1.47K"); + assert_eq!(sizify(1500000, false), "1.44M"); + assert_eq!(sizify(1500000000, false), "1.4G"); + } + + #[test] + fn test_sizify_edge_cases() { + assert_eq!(sizify(1, false), "1"); + assert_eq!(sizify(1023, false), "1023"); + assert_eq!(sizify(1025, false), "1.01K"); + } +} diff --git a/ferron/src/util/sni.rs b/ferron/src/util/sni.rs new file mode 100644 index 0000000000000000000000000000000000000000..b68fb771dd5e22da5ac0cc8cc9765117102dbd66 --- /dev/null +++ b/ferron/src/util/sni.rs @@ -0,0 +1,46 @@ +use crate::ferron_util::match_hostname::match_hostname; +use rustls::{server::ResolvesServerCert, sign::CertifiedKey}; +use std::{collections::HashMap, sync::Arc}; + +#[derive(Debug)] +pub struct CustomSniResolver { + fallback_cert_key: Option>, + cert_keys: HashMap>, +} + +impl CustomSniResolver { + pub fn new() -> Self { + Self { + fallback_cert_key: None, + cert_keys: HashMap::new(), + } + } + + pub fn load_fallback_cert_key(&mut self, fallback_cert_key: Arc) { + self.fallback_cert_key = Some(fallback_cert_key); + } + + pub fn load_host_cert_key(&mut self, host: &str, cert_key: Arc) { + self.cert_keys.insert(String::from(host), cert_key); + } +} + +impl ResolvesServerCert for CustomSniResolver { + fn resolve( + &self, + client_hello: rustls::server::ClientHello<'_>, + ) -> Option> { + let hostname = client_hello.server_name(); + if let Some(hostname) = hostname { + let keys_iterator = self.cert_keys.keys(); + for configured_hostname in keys_iterator { + if match_hostname(Some(configured_hostname), Some(hostname)) { + return self.cert_keys.get(configured_hostname).cloned(); + } + } + self.fallback_cert_key.clone() + } else { + self.fallback_cert_key.clone() + } + } +} diff --git a/ferron/src/util/split_stream_by_map.rs b/ferron/src/util/split_stream_by_map.rs new file mode 100644 index 0000000000000000000000000000000000000000..79806226334ebd117a3a757ed49ce1751cd8beaa --- /dev/null +++ b/ferron/src/util/split_stream_by_map.rs @@ -0,0 +1,348 @@ +// Copyright (c) Andrew Burkett +// Portions of this file are derived from `split-stream-by` (https://github.com/drewkett/split-stream-by). +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +use std::marker::PhantomData; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Poll, Waker}; + +pub use futures_util::future::Either; +use futures_util::stream::Stream; +use pin_project_lite::pin_project; +use tokio::sync::Mutex; + +pin_project! { +struct SplitByMap { + buf_left: Option, + buf_right: Option, + waker_left: Option, + waker_right: Option, + #[pin] + stream: S, + predicate: P, + item: PhantomData, +} +} + +impl SplitByMap +where + S: Stream, + P: Fn(I) -> Either, +{ + fn new(stream: S, predicate: P) -> Arc> { + Arc::new(Mutex::new(Self { + buf_right: None, + buf_left: None, + waker_right: None, + waker_left: None, + stream, + predicate, + item: PhantomData, + })) + } + + fn poll_next_left( + self: std::pin::Pin<&mut Self>, + cx: &mut futures_util::task::Context<'_>, + ) -> std::task::Poll> { + let this = self.project(); + // Assign the waker multiple times, because if it was only once, the waking might fail + *this.waker_left = Some(cx.waker().clone()); + if let Some(item) = this.buf_left.take() { + // There was already a value in the buffer. Return that value + return Poll::Ready(Some(item)); + } + if this.buf_right.is_some() { + // There is a value available for the other stream. Wake that stream if possible + // and return pending since we can't store multiple values for a stream + if let Some(waker) = this.waker_right { + waker.wake_by_ref(); + } + return Poll::Pending; + } + match this.stream.poll_next(cx) { + Poll::Ready(Some(item)) => { + match (this.predicate)(item) { + Either::Left(left_item) => Poll::Ready(Some(left_item)), + Either::Right(right_item) => { + // This value is not what we wanted. Store it and notify other partition + // task if it exists + let _ = this.buf_right.replace(right_item); + if let Some(waker) = this.waker_right { + waker.wake_by_ref(); + } + Poll::Pending + } + } + } + Poll::Ready(None) => { + // If the underlying stream is finished, the `right` stream also must be + // finished, so wake it in case nothing else polls it + if let Some(waker) = this.waker_right { + waker.wake_by_ref(); + } + Poll::Ready(None) + } + Poll::Pending => Poll::Pending, + } + } + + fn poll_next_right( + self: std::pin::Pin<&mut Self>, + cx: &mut futures_util::task::Context<'_>, + ) -> std::task::Poll> { + let this = self.project(); + // Assign the waker multiple times, because if it was only once, the waking might fail + *this.waker_right = Some(cx.waker().clone()); + if let Some(item) = this.buf_right.take() { + // There was already a value in the buffer. Return that value + return Poll::Ready(Some(item)); + } + if this.buf_left.is_some() { + // There is a value available for the other stream. Wake that stream if possible + // and return pending since we can't store multiple values for a stream + if let Some(waker) = this.waker_left { + waker.wake_by_ref(); + } + return Poll::Pending; + } + match this.stream.poll_next(cx) { + Poll::Ready(Some(item)) => { + match (this.predicate)(item) { + Either::Left(left_item) => { + // This value is not what we wanted. Store it and notify other partition + // task if it exists + let _ = this.buf_left.replace(left_item); + if let Some(waker) = this.waker_left { + waker.wake_by_ref(); + } + Poll::Pending + } + Either::Right(right_item) => Poll::Ready(Some(right_item)), + } + } + Poll::Ready(None) => { + // If the underlying stream is finished, the `left` stream also must be + // finished, so wake it in case nothing else polls it + if let Some(waker) = this.waker_left { + waker.wake_by_ref(); + } + Poll::Ready(None) + } + Poll::Pending => Poll::Pending, + } + } +} + +/// A struct that implements `Stream` which returns the inner values where +/// the predicate returns `Either::Left(..)` when using `split_by_map` +#[allow(clippy::type_complexity)] +pub struct LeftSplitByMap { + stream: Arc>>, +} + +impl LeftSplitByMap { + #[allow(clippy::type_complexity)] + fn new(stream: Arc>>) -> Self { + Self { stream } + } +} + +impl Stream for LeftSplitByMap +where + S: Stream + Unpin, + P: Fn(I) -> Either, +{ + type Item = L; + fn poll_next( + self: std::pin::Pin<&mut Self>, + cx: &mut futures_util::task::Context<'_>, + ) -> std::task::Poll> { + let response = if let Ok(mut guard) = self.stream.try_lock() { + SplitByMap::poll_next_left(Pin::new(&mut guard), cx) + } else { + cx.waker().wake_by_ref(); + Poll::Pending + }; + response + } +} + +/// A struct that implements `Stream` which returns the inner values where +/// the predicate returns `Either::Right(..)` when using `split_by_map` +#[allow(clippy::type_complexity)] +pub struct RightSplitByMap { + stream: Arc>>, +} + +impl RightSplitByMap { + #[allow(clippy::type_complexity)] + fn new(stream: Arc>>) -> Self { + Self { stream } + } +} + +impl Stream for RightSplitByMap +where + S: Stream + Unpin, + P: Fn(I) -> Either, +{ + type Item = R; + fn poll_next( + self: std::pin::Pin<&mut Self>, + cx: &mut futures_util::task::Context<'_>, + ) -> std::task::Poll> { + let response = if let Ok(mut guard) = self.stream.try_lock() { + SplitByMap::poll_next_right(Pin::new(&mut guard), cx) + } else { + cx.waker().wake_by_ref(); + Poll::Pending + }; + response + } +} + +/// This extension trait provides the functionality for splitting a +/// stream by a predicate of type `Fn(Self::Item) -> Either`. The resulting +/// streams will yield types `L` and `R` respectively +pub trait SplitStreamByMapExt: Stream { + /// This takes ownership of a stream and returns two streams based on a + /// predicate. The predicate takes an item by value and returns + /// `Either::Left(..)` or `Either::Right(..)` where the inner + /// values of `Left` and `Right` become the items of the two respective + /// streams + /// + /// ``` + /// use split_stream_by::{Either, SplitStreamByMapExt}; + /// struct Request { + /// //... + /// } + /// struct Response { + /// //... + /// } + /// enum Message { + /// Request(Request), + /// Response(Response) + /// } + /// let incoming_stream = futures::stream::iter([ + /// Message::Request(Request {}), + /// Message::Response(Response {}), + /// Message::Response(Response {}), + /// ]); + /// let (mut request_stream, mut response_stream) = incoming_stream.split_by_map(|item| match item { + /// Message::Request(req) => Either::Left(req), + /// Message::Response(res) => Either::Right(res), + /// }); + /// ``` + #[allow(clippy::type_complexity)] + fn split_by_map( + self, + predicate: P, + ) -> ( + LeftSplitByMap, + RightSplitByMap, + ) + where + P: Fn(Self::Item) -> Either, + Self: Sized, + { + let stream = SplitByMap::new(self, predicate); + let true_stream = LeftSplitByMap::new(stream.clone()); + let false_stream = RightSplitByMap::new(stream); + (true_stream, false_stream) + } +} + +impl SplitStreamByMapExt for T where T: Stream + ?Sized {} + +#[cfg(test)] +mod tests { + use super::*; + use futures_util::{stream, StreamExt}; + + #[tokio::test] + async fn test_split_by_map_basic() { + let input_stream = stream::iter(vec![1, 2, 3, 4, 5, 6]); + let (evens, odds) = input_stream.split_by_map(|x| { + if x % 2 == 0 { + Either::Left(x) + } else { + Either::Right(x) + } + }); + + tokio::spawn(async move { + let evens_collected: Vec = evens.collect().await; + assert_eq!(evens_collected, vec![2, 4, 6]); + }); + + tokio::spawn(async move { + let odds_collected: Vec = odds.collect().await; + assert_eq!(odds_collected, vec![1, 3, 5]); + }); + } + + #[tokio::test] + async fn test_split_by_map_empty_stream() { + let input_stream = stream::iter(Vec::::new()); + let (left, right) = input_stream.split_by_map(|x| { + if x % 2 == 0 { + Either::Left(x) + } else { + Either::Right(x) + } + }); + + tokio::spawn(async move { + let left_collected: Vec = left.collect().await; + assert!(left_collected.is_empty()); + }); + + tokio::spawn(async move { + let right_collected: Vec = right.collect().await; + assert!(right_collected.is_empty()); + }); + } + + #[tokio::test] + async fn test_split_by_map_all_left() { + let input_stream = stream::iter(vec![2, 4, 6, 8]); + let (left, right) = input_stream.split_by_map(Either::::Left); + + tokio::spawn(async move { + let left_collected: Vec = left.collect().await; + assert_eq!(left_collected, vec![2, 4, 6, 8]); + }); + + tokio::spawn(async move { + let right_collected: Vec = right.collect().await; + assert!(right_collected.is_empty()); + }); + } + + #[tokio::test] + async fn test_split_by_map_all_right() { + let input_stream = stream::iter(vec![1, 3, 5, 7]); + let (left, right) = input_stream.split_by_map(Either::::Right); + + tokio::spawn(async move { + let left_collected: Vec = left.collect().await; + assert!(left_collected.is_empty()); + }); + + tokio::spawn(async move { + let right_collected: Vec = right.collect().await; + assert_eq!(right_collected, vec![1, 3, 5, 7]); + }); + } +} diff --git a/ferron/src/util/ttl_cache.rs b/ferron/src/util/ttl_cache.rs new file mode 100644 index 0000000000000000000000000000000000000000..39911114f0dbb840f56be00c4a7d8fbe92f3dc66 --- /dev/null +++ b/ferron/src/util/ttl_cache.rs @@ -0,0 +1,111 @@ +use std::collections::HashMap; +use std::time::{Duration, Instant}; + +pub struct TtlCache { + cache: HashMap, + ttl: Duration, +} + +impl TtlCache +where + K: std::cmp::Eq + std::hash::Hash + Clone, + V: Clone, +{ + pub fn new(ttl: Duration) -> Self { + Self { + cache: HashMap::new(), + ttl, + } + } + + pub fn insert(&mut self, key: K, value: V) { + self.cache.insert(key, (value, Instant::now())); + } + + pub fn get(&self, key: &K) -> Option { + self.cache.get(key).and_then(|(value, timestamp)| { + if timestamp.elapsed() < self.ttl { + Some(value.clone()) + } else { + None + } + }) + } + + #[allow(dead_code)] + pub fn remove(&mut self, key: &K) -> Option { + self.cache.remove(key).map(|(value, _)| value) + } + + pub fn cleanup(&mut self) { + self + .cache + .retain(|_, (_, timestamp)| timestamp.elapsed() < self.ttl); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::thread::sleep; + use std::time::Duration; + + #[test] + fn test_insert_and_get() { + let mut cache = TtlCache::new(Duration::new(5, 0)); + cache.insert("key1", "value1"); + + assert_eq!(cache.get(&"key1"), Some("value1")); + } + + #[test] + fn test_get_expired() { + let mut cache = TtlCache::new(Duration::new(1, 0)); + cache.insert("key1", "value1"); + + // Sleep for 2 seconds to ensure the entry expires + sleep(Duration::new(2, 0)); + + assert_eq!(cache.get(&"key1"), None); + } + + #[test] + fn test_remove() { + let mut cache = TtlCache::new(Duration::new(5, 0)); + cache.insert("key1", "value1"); + cache.remove(&"key1"); + + assert_eq!(cache.get(&"key1"), None); + } + + #[test] + fn test_cleanup() { + let mut cache = TtlCache::new(Duration::new(1, 0)); + cache.insert("key1", "value1"); + cache.insert("key2", "value2"); + + // Sleep for 2 seconds to ensure the entries expire + sleep(Duration::new(2, 0)); + + cache.cleanup(); + + assert_eq!(cache.get(&"key1"), None); + assert_eq!(cache.get(&"key2"), None); + } + + #[test] + fn test_get_non_existent() { + let cache: TtlCache<&str, &str> = TtlCache::new(Duration::new(5, 0)); + assert_eq!(cache.get(&"key1"), None); + } + + #[test] + fn test_insert_and_get_multiple() { + let mut cache = TtlCache::new(Duration::new(5, 0)); + cache.insert("key1", "value1"); + cache.insert("key2", "value2"); + + assert_eq!(cache.get(&"key1"), Some("value1")); + assert_eq!(cache.get(&"key2"), Some("value2")); + } +} diff --git a/ferron/src/util/url_rewrite_structs.rs b/ferron/src/util/url_rewrite_structs.rs new file mode 100644 index 0000000000000000000000000000000000000000..3e4538686bf9777632f2f8d950c572c3b35b5ca9 --- /dev/null +++ b/ferron/src/util/url_rewrite_structs.rs @@ -0,0 +1,64 @@ +use fancy_regex::Regex; + +pub struct UrlRewriteMapEntry { + pub regex: Regex, + pub replacement: String, + pub is_not_directory: bool, + pub is_not_file: bool, + pub last: bool, + pub allow_double_slashes: bool, +} + +impl UrlRewriteMapEntry { + pub fn new( + regex: Regex, + replacement: String, + is_not_directory: bool, + is_not_file: bool, + last: bool, + allow_double_slashes: bool, + ) -> Self { + Self { + regex, + replacement, + is_not_directory, + is_not_file, + last, + allow_double_slashes, + } + } +} + +pub struct UrlRewriteMapWrap { + pub domain: Option, + pub ip: Option, + pub rewrite_map: Vec, + pub locations: Vec, +} + +impl UrlRewriteMapWrap { + pub fn new( + domain: Option, + ip: Option, + rewrite_map: Vec, + locations: Vec, + ) -> Self { + Self { + domain, + ip, + rewrite_map, + locations, + } + } +} + +pub struct UrlRewriteMapLocationWrap { + pub path: String, + pub rewrite_map: Vec, +} + +impl UrlRewriteMapLocationWrap { + pub fn new(path: String, rewrite_map: Vec) -> Self { + Self { path, rewrite_map } + } +} diff --git a/ferron/src/util/url_sanitizer.rs b/ferron/src/util/url_sanitizer.rs new file mode 100644 index 0000000000000000000000000000000000000000..1a9eb088ea0a2ad4b42c129d6dbf32d7ed96818b --- /dev/null +++ b/ferron/src/util/url_sanitizer.rs @@ -0,0 +1,300 @@ +// Copyright (c) 2018-2025 SVR.JS +// Portions of this file are derived from SVR.JS (https://git.svrjs.org/svrjs/svrjs). +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +use anyhow::{anyhow, Result}; +use std::str; + +pub fn sanitize_url(resource: &str, allow_double_slashes: bool) -> Result { + if resource == "*" || resource.is_empty() { + return Ok(resource.to_string()); + } + + let mut sanitized = String::with_capacity(resource.len()); + + // Remove null bytes and handle initial sanitization + for &ch in resource.as_bytes() { + if ch != b'\0' { + sanitized.push(ch as char); + } + } + + // Check for malformed URL encoding (invalid percent encoding) + let bytes = sanitized.as_bytes(); + let mut i = 0; + while i < bytes.len() { + if bytes[i] == b'%' { + if i + 2 >= bytes.len() { + return Err(anyhow!("URI malformed")); + } + let hex = &bytes[i + 1..i + 3]; + if !hex[0].is_ascii_hexdigit() || !hex[1].is_ascii_hexdigit() { + return Err(anyhow!("URI malformed")); + } + let value = u8::from_str_radix(str::from_utf8(hex)?, 16)?; + if value == 0xc0 || value == 0xc1 || value >= 0xfe { + return Err(anyhow!("URI malformed")); + } + } + i += 1; + } + + // Decode percent-encoded characters while preserving safe ones + let mut decoded = String::with_capacity(sanitized.len()); + let bytes = sanitized.as_bytes(); + let mut i = 0; + while i < bytes.len() { + if bytes[i] == b'%' && i + 2 < bytes.len() { + let hex = &bytes[i + 1..i + 3]; + if let Ok(value) = u8::from_str_radix(str::from_utf8(hex)?, 16) { + if value != 0 { + let decoded_char = value as char; + if decoded_char.is_ascii_alphanumeric() + || "!$&'()*+,-./0123456789:;=@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]_abcdefghijklmnopqrstuvwxyz~" + .contains(decoded_char) + { + decoded.push(decoded_char); + } else { + decoded.push('%'); + decoded.push(hex[0] as char); + decoded.push(hex[1] as char); + } + i += 2; + } else { + i += 3; + continue; + } + } else { + decoded.push('%'); + } + } else { + decoded.push(bytes[i] as char); + } + i += 1; + } + + // Encode unsafe characters + let mut encoded = String::with_capacity(decoded.len()); + for ch in decoded.chars() { + match ch { + '<' | '>' | '^' | '`' | '{' | '|' | '}' => { + encoded.push_str(&format!("%{:02X}", ch as u8)); + } + _ => encoded.push(ch), + } + } + + // Ensure the resource starts with a slash + if !encoded.starts_with('/') { + encoded.insert(0, '/'); + } + + // Convert backslashes to slashes and handle duplicate slashes + let mut final_resource = String::with_capacity(encoded.len()); + let mut last_was_slash = false; + for ch in encoded.chars() { + if ch == '\\' { + final_resource.push('/'); + last_was_slash = true; + } else if ch == '/' { + if !allow_double_slashes && last_was_slash { + continue; + } + final_resource.push('/'); + last_was_slash = true; + } else { + final_resource.push(ch); + last_was_slash = false; + } + } + + // Normalize path segments (remove ".", "..", trailing dots) + let mut segments: Vec<&str> = Vec::new(); + for mut part in final_resource.split('/') { + match part { + "." => continue, + ".." => { + segments.pop(); // Go up one directory + } + "" => { + if allow_double_slashes { + segments.push(""); + } + } + _ => { + while part.ends_with('.') { + part = &part[..part.len() - 1]; + } + if !part.is_empty() { + segments.push(part); + } + } + } + } + + final_resource = if allow_double_slashes { + segments.join("/") + } else if !segments.is_empty() && final_resource.ends_with('/') { + format!("/{}/", segments.join("/")) + } else { + format!("/{}", segments.join("/")) + }; + + // Remove any remaining "/../" sequences + while final_resource.contains("/../") { + final_resource = final_resource.replacen("/../", "", 1); + } + + // Ensure result is not empty + if final_resource.is_empty() { + final_resource.push('/'); + } + + Ok(final_resource) +} + +// Path sanitizer tests taken from SVR.JS web server +#[cfg(test)] +mod tests { + use super::*; + use anyhow::Result; + + #[test] + fn should_return_asterisk_for_asterisk() -> Result<()> { + assert_eq!(sanitize_url("*", false)?, "*"); + Ok(()) + } + + #[test] + fn should_return_empty_string_for_empty_string() -> Result<()> { + assert_eq!(sanitize_url("", false)?, ""); + Ok(()) + } + + #[test] + fn should_remove_null_characters() -> Result<()> { + assert_eq!(sanitize_url("/test%00", false)?, "/test"); + assert_eq!(sanitize_url("/test\0", false)?, "/test"); + Ok(()) + } + + #[test] + fn should_throw_uri_error_for_malformed_url() { + assert!(sanitize_url("%c0%af", false).is_err()); + assert!(sanitize_url("%u002f", false).is_err()); + assert!(sanitize_url("%as", false).is_err()); + } + + #[test] + fn should_ensure_the_resource_starts_with_a_slash() -> Result<()> { + assert_eq!(sanitize_url("test", false)?, "/test"); + Ok(()) + } + + #[test] + fn should_convert_backslashes_to_slashes() -> Result<()> { + assert_eq!(sanitize_url("test\\path", false)?, "/test/path"); + Ok(()) + } + + #[test] + fn should_handle_duplicate_slashes() -> Result<()> { + assert_eq!(sanitize_url("test//path", false)?, "/test/path"); + assert_eq!(sanitize_url("test//path", true)?, "/test//path"); + Ok(()) + } + + #[test] + fn should_handle_relative_navigation() -> Result<()> { + assert_eq!(sanitize_url("/./test", false)?, "/test"); + assert_eq!(sanitize_url("/../test", false)?, "/test"); + assert_eq!(sanitize_url("../test", false)?, "/test"); + assert_eq!(sanitize_url("./test", false)?, "/test"); + assert_eq!(sanitize_url("/test/./", false)?, "/test/"); + assert_eq!(sanitize_url("/test/../", false)?, "/"); + assert_eq!(sanitize_url("/test/../path", false)?, "/path"); + Ok(()) + } + + #[test] + fn should_remove_trailing_dots_in_paths() -> Result<()> { + assert_eq!(sanitize_url("/test...", false)?, "/test"); + assert_eq!(sanitize_url("/test.../", false)?, "/test/"); + Ok(()) + } + + #[test] + fn should_return_slash_for_empty_sanitized_resource() -> Result<()> { + assert_eq!(sanitize_url("/../..", false)?, "/"); + Ok(()) + } + + #[test] + fn should_encode_special_characters() -> Result<()> { + assert_eq!(sanitize_url("/test", false)?, "/test%3Cpath%3E"); + assert_eq!(sanitize_url("/test^path", false)?, "/test%5Epath"); + assert_eq!(sanitize_url("/test`path", false)?, "/test%60path"); + assert_eq!(sanitize_url("/test{path}", false)?, "/test%7Bpath%7D"); + assert_eq!(sanitize_url("/test|path", false)?, "/test%7Cpath"); + Ok(()) + } + + #[test] + fn should_preserve_certain_characters() -> Result<()> { + assert_eq!(sanitize_url("/test!path", false)?, "/test!path"); + assert_eq!(sanitize_url("/test$path", false)?, "/test$path"); + assert_eq!(sanitize_url("/test&path", false)?, "/test&path"); + assert_eq!(sanitize_url("/test-path", false)?, "/test-path"); + assert_eq!(sanitize_url("/test=path", false)?, "/test=path"); + assert_eq!(sanitize_url("/test@path", false)?, "/test@path"); + assert_eq!(sanitize_url("/test_path", false)?, "/test_path"); + assert_eq!(sanitize_url("/test~path", false)?, "/test~path"); + Ok(()) + } + + #[test] + fn should_decode_url_encoded_characters_while_preserving_certain_characters() -> Result<()> { + assert_eq!(sanitize_url("/test%20path", false)?, "/test%20path"); + assert_eq!(sanitize_url("/test%21path", false)?, "/test!path"); + assert_eq!(sanitize_url("/test%22path", false)?, "/test%22path"); + assert_eq!(sanitize_url("/test%24path", false)?, "/test$path"); + assert_eq!(sanitize_url("/test%25path", false)?, "/test%25path"); + assert_eq!(sanitize_url("/test%26path", false)?, "/test&path"); + assert_eq!(sanitize_url("/test%2Dpath", false)?, "/test-path"); + assert_eq!(sanitize_url("/test%3Cpath", false)?, "/test%3Cpath"); + assert_eq!(sanitize_url("/test%3Dpath", false)?, "/test=path"); + assert_eq!(sanitize_url("/test%3Epath", false)?, "/test%3Epath"); + assert_eq!(sanitize_url("/test%40path", false)?, "/test@path"); + assert_eq!(sanitize_url("/test%5Fpath", false)?, "/test_path"); + assert_eq!(sanitize_url("/test%7Dpath", false)?, "/test%7Dpath"); + assert_eq!(sanitize_url("/test%7Epath", false)?, "/test~path"); + Ok(()) + } + + #[test] + fn should_decode_url_encoded_alphanumeric_characters_while_preserving_certain_characters( + ) -> Result<()> { + assert_eq!(sanitize_url("/conf%69g.json", false)?, "/config.json"); + assert_eq!(sanitize_url("/CONF%49G.JSON", false)?, "/CONFIG.JSON"); + assert_eq!(sanitize_url("/svr%32.js", false)?, "/svr2.js"); + assert_eq!(sanitize_url("/%73%76%72%32%2E%6A%73", false)?, "/svr2.js"); + Ok(()) + } + + #[test] + fn should_decode_url_encoded_characters_regardless_of_the_letter_case_of_the_url_encoding( + ) -> Result<()> { + assert_eq!(sanitize_url("/%5f", false)?, "/_"); + assert_eq!(sanitize_url("/%5F", false)?, "/_"); + Ok(()) + } +} diff --git a/ferron/src/util/validate_config.rs b/ferron/src/util/validate_config.rs new file mode 100644 index 0000000000000000000000000000000000000000..2d91f9b8a88b5ba9d25643ab604e2a918ab8cfa9 --- /dev/null +++ b/ferron/src/util/validate_config.rs @@ -0,0 +1,1155 @@ +use crate::ferron_common::ServerConfig; +use hyper::header::{HeaderName, HeaderValue}; +use std::collections::HashSet; +use std::error::Error; +use std::net::IpAddr; +use std::str::FromStr; +use yaml_rust2::{yaml, Yaml}; + +// Struct to store used configuration properties +struct UsedProperties<'a> { + config: &'a ServerConfig, + properties: HashSet, +} + +impl<'a> UsedProperties<'a> { + fn new(config: &'a ServerConfig) -> Self { + UsedProperties { + config, + properties: HashSet::new(), + } + } + + fn contains(&mut self, property: &str) -> bool { + self.properties.insert(property.to_string()); + !self.config[property].is_badvalue() + } + + fn unused(&self) -> Vec { + let empty_hashmap = yaml::Hash::new(); + let all_properties = self + .config + .as_hash() + .unwrap_or(&empty_hashmap) + .keys() + .filter_map(|a| a.as_str().map(|a| a.to_string())); + all_properties + .filter(|item| !self.properties.contains(item)) + .collect() + } +} + +fn validate_ip(ip: &str) -> bool { + let _: IpAddr = match ip.parse() { + Ok(addr) => addr, + Err(_) => return false, + }; + true +} + +// Internal configuration file validators +pub fn validate_config( + config: ServerConfig, + is_global: bool, + is_location: bool, + modules_optional_builtin: &[String], +) -> Result, Box> { + let mut used_properties = UsedProperties::new(&config); + + let domain_badvalue = !used_properties.contains("domain"); + let ip_badvalue = !used_properties.contains("ip"); + + if !domain_badvalue && config["domain"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid domain name"))? + } + + if !ip_badvalue { + match config["ip"].as_str() { + Some(ip) => { + if !validate_ip(ip) { + Err(anyhow::anyhow!("Invalid IP address"))?; + } + } + None => { + Err(anyhow::anyhow!("Invalid IP address"))?; + } + } + } + + if domain_badvalue && ip_badvalue && !is_global && !is_location { + Err(anyhow::anyhow!( + "A host must either have IP address or domain name specified" + ))?; + } + + if used_properties.contains("path") { + if !is_location { + Err(anyhow::anyhow!( + "Location path configuration is only allowed in location configuration" + ))?; + } + if config["path"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid location path"))?; + } + } + + if used_properties.contains("locations") && is_location { + Err(anyhow::anyhow!("Nested locations are not allowed"))?; + } + + if used_properties.contains("loadModules") { + if !is_global { + Err(anyhow::anyhow!( + "Module configuration is not allowed in host configuration" + ))? + } + if let Some(modules) = config["loadModules"].as_vec() { + let modules_iter = modules.iter(); + for module_name_yaml in modules_iter { + if module_name_yaml.as_str().is_none() { + Err(anyhow::anyhow!("Invalid module name"))? + } + } + } else { + Err(anyhow::anyhow!("Invalid module configuration"))? + } + } + + if used_properties.contains("port") { + if !is_global { + Err(anyhow::anyhow!( + "HTTP port configuration is not allowed in host configuration" + ))? + } + if let Some(port) = config["port"].as_i64() { + if !(0..=65535).contains(&port) { + Err(anyhow::anyhow!("Invalid HTTP port"))? + } + } else if config["port"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid HTTP port"))? + } + } + + if used_properties.contains("sport") { + if !is_global { + Err(anyhow::anyhow!( + "HTTPS port configuration is not allowed in host configuration" + ))? + } + if let Some(port) = config["sport"].as_i64() { + if !(0..=65535).contains(&port) { + Err(anyhow::anyhow!("Invalid HTTPS port"))? + } + } else if config["sport"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid HTTPS port"))? + } + } + + if used_properties.contains("secure") { + if !is_global { + Err(anyhow::anyhow!( + "HTTPS enabling configuration is not allowed in host configuration" + ))? + } + if config["secure"].as_bool().is_none() { + Err(anyhow::anyhow!("Invalid HTTPS enabling option value"))? + } + } + + if used_properties.contains("enableHTTP2") { + if !is_global { + Err(anyhow::anyhow!( + "HTTP/2 enabling configuration is not allowed in host configuration" + ))? + } + if config["enableHTTP2"].as_bool().is_none() { + Err(anyhow::anyhow!("Invalid HTTP/2 enabling option value"))? + } + } + + if used_properties.contains("enableHTTP3") { + if !is_global { + Err(anyhow::anyhow!( + "HTTP/3 enabling configuration is not allowed in host configuration" + ))? + } + if config["enableHTTP3"].as_bool().is_none() { + Err(anyhow::anyhow!("Invalid HTTP/3 enabling option value"))? + } + } + + if used_properties.contains("logFilePath") { + if !is_global { + Err(anyhow::anyhow!( + "Log file configuration is not allowed in host configuration" + ))? + } + if config["logFilePath"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid log file path"))? + } + } + + if used_properties.contains("errorLogFilePath") { + if !is_global { + Err(anyhow::anyhow!( + "Error log file configuration is not allowed in host configuration" + ))? + } + if config["errorLogFilePath"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid error log file path"))? + } + } + + if used_properties.contains("cert") { + if !is_global { + Err(anyhow::anyhow!( + "TLS certificate configuration is not allowed in host configuration" + ))? + } + if config["cert"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid TLS certificate path"))? + } + } + + if used_properties.contains("key") { + if !is_global { + Err(anyhow::anyhow!( + "Private key configuration is not allowed in host configuration" + ))? + } + if config["key"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid private key path"))? + } + } + + if used_properties.contains("sni") { + if !is_global { + Err(anyhow::anyhow!( + "SNI configuration is not allowed in host configuration" + ))? + } + if let Some(sni) = config["sni"].as_hash() { + let sni_hostnames = sni.keys(); + for sni_hostname_unknown in sni_hostnames { + if let Some(sni_hostname) = sni_hostname_unknown.as_str() { + if sni[sni_hostname_unknown]["cert"].as_str().is_none() { + Err(anyhow::anyhow!( + "Invalid SNI TLS certificate path for \"{}\"", + sni_hostname + ))? + } + if sni[sni_hostname_unknown]["key"].as_str().is_none() { + Err(anyhow::anyhow!( + "Invalid SNI private key certificate path for \"{}\"", + sni_hostname + ))? + } + } else { + Err(anyhow::anyhow!("Invalid SNI hostname"))? + } + } + } else { + Err(anyhow::anyhow!("Invalid SNI certificate list"))? + } + } + + if used_properties.contains("http2Options") { + if !is_global { + Err(anyhow::anyhow!( + "HTTP/2 configuration is not allowed in host configuration" + ))? + } + if config["http2Options"].as_hash().is_some() { + if let Some(initial_window_size) = config["http2Options"]["initialWindowSize"].as_i64() { + if !(0..=2_147_483_647).contains(&initial_window_size) { + Err(anyhow::anyhow!("Invalid HTTP/2 initial window size"))? + } + } + + if let Some(max_frame_size) = config["http2Options"]["maxFrameSize"].as_i64() { + if !(16_384..=16_777_215).contains(&max_frame_size) { + Err(anyhow::anyhow!("Invalid HTTP/2 max frame size"))? + } + } + + if let Some(max_concurrent_streams) = config["http2Options"]["maxConcurrentStreams"].as_i64() + { + if max_concurrent_streams < 0 { + Err(anyhow::anyhow!("Invalid HTTP/2 max concurrent streams"))? + } + } + + if let Some(max_header_list_size) = config["http2Options"]["maxHeaderListSize"].as_i64() { + if max_header_list_size < 0 { + Err(anyhow::anyhow!("Invalid HTTP/2 max header list size"))? + } + } + + if !config["http2Options"]["enableConnectProtocol"].is_badvalue() + && config["http2Options"]["enableConnectProtocol"] + .as_bool() + .is_none() + { + Err(anyhow::anyhow!( + "Invalid HTTP/2 enable connect protocol option" + ))? + } + } else { + Err(anyhow::anyhow!("Invalid HTTP/2 options"))? + } + } + + if used_properties.contains("useClientCertificate") { + if !is_global { + Err(anyhow::anyhow!( + "Client certificate verfication enabling option is not allowed in host configuration" + ))? + } + if config["useClientCertificate"].as_bool().is_none() { + Err(anyhow::anyhow!( + "Invalid client certificate verification enabling option value" + ))? + } + } + + if used_properties.contains("cipherSuite") { + if !is_global { + Err(anyhow::anyhow!( + "Cipher suite configuration is not allowed in host configuration" + ))? + } + if let Some(cipher_suites) = config["cipherSuite"].as_vec() { + let cipher_suites_iter = cipher_suites.iter(); + for cipher_suite_name_yaml in cipher_suites_iter { + if cipher_suite_name_yaml.as_str().is_none() { + Err(anyhow::anyhow!("Invalid cipher suite"))? + } + } + } else { + Err(anyhow::anyhow!("Invalid cipher suite configuration"))? + } + } + + if used_properties.contains("ecdhCurve") { + if !is_global { + Err(anyhow::anyhow!( + "ECDH curve configuration is not allowed in host configuration" + ))? + } + if let Some(ecdh_curves) = config["ecdhCurve"].as_vec() { + let ecdh_curves_iter = ecdh_curves.iter(); + for ecdh_curve_name_yaml in ecdh_curves_iter { + if ecdh_curve_name_yaml.as_str().is_none() { + Err(anyhow::anyhow!("Invalid ECDH curve"))? + } + } + } else { + Err(anyhow::anyhow!("Invalid ECDH curve configuration"))? + } + } + + if used_properties.contains("tlsMinVersion") { + if !is_global { + Err(anyhow::anyhow!( + "Minimum TLS version is not allowed in host configuration" + ))? + } + if config["tlsMinVersion"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid minimum TLS version"))? + } + } + + if used_properties.contains("tlsMaxVersion") { + if !is_global { + Err(anyhow::anyhow!( + "Maximum TLS version is not allowed in host configuration" + ))? + } + if config["tlsMaxVersion"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid maximum TLS version"))? + } + } + + if used_properties.contains("enableOCSPStapling") { + if !is_global { + Err(anyhow::anyhow!( + "OCSP stapling enabling option is not allowed in host configuration" + ))? + } + if config["enableOCSPStapling"].as_bool().is_none() { + Err(anyhow::anyhow!( + "Invalid OCSP stapling enabling option value" + ))? + } + } + + if used_properties.contains("serverAdministratorEmail") + && config["serverAdministratorEmail"].as_str().is_none() + { + Err(anyhow::anyhow!( + "Invalid server administrator email address" + ))? + } + + if used_properties.contains("enableIPSpoofing") && config["enableIPSpoofing"].as_bool().is_none() + { + Err(anyhow::anyhow!( + "Invalid X-Forwarded-For enabling option value" + ))? + } + + if used_properties.contains("disableNonEncryptedServer") { + if !is_global { + Err(anyhow::anyhow!( + "Non-encrypted server disabling option is not allowed in host configuration" + ))? + } + if config["disableNonEncryptedServer"].as_bool().is_none() { + Err(anyhow::anyhow!( + "Invalid non-encrypted server disabling option value" + ))? + } + } + + if used_properties.contains("blocklist") { + if !is_global { + Err(anyhow::anyhow!( + "Block list configuration is not allowed in host configuration" + ))? + } + if let Some(blocklist) = config["blocklist"].as_vec() { + let blocklist_iter = blocklist.iter(); + for blocklist_entry_yaml in blocklist_iter { + match blocklist_entry_yaml.as_str() { + Some(blocklist_entry) => { + if !validate_ip(blocklist_entry) { + Err(anyhow::anyhow!("Invalid block list entry"))? + } + } + None => Err(anyhow::anyhow!("Invalid block list entry"))?, + } + } + } else { + Err(anyhow::anyhow!("Invalid block list configuration"))? + } + } + + if used_properties.contains("environmentVariables") { + if !is_global { + Err(anyhow::anyhow!( + "Environment variable configuration is not allowed in host configuration" + ))? + } + if let Some(environment_variables_hash) = config["environmentVariables"].as_hash() { + let environment_variables_hash_iter = environment_variables_hash.iter(); + for (var_name, var_value) in environment_variables_hash_iter { + if var_name.as_str().is_none() || var_value.as_str().is_none() { + Err(anyhow::anyhow!("Invalid environment variables"))? + } + } + } else { + Err(anyhow::anyhow!("Invalid environment variables"))? + } + } + + if used_properties.contains("disableToHTTPSRedirect") + && config["disableToHTTPSRedirect"].as_bool().is_none() + { + Err(anyhow::anyhow!( + "Invalid HTTP to HTTPS redirect disabling option value" + ))? + } + + if used_properties.contains("wwwredirect") && config["wwwredirect"].as_bool().is_none() { + Err(anyhow::anyhow!( + "Invalid to \"www.\" URL redirect disabling option value" + ))? + } + + if used_properties.contains("customHeaders") { + if let Some(custom_headers_hash) = config["customHeaders"].as_hash() { + let custom_headers_hash_iter = custom_headers_hash.iter(); + for (header_name, header_value) in custom_headers_hash_iter { + if let Some(header_name) = header_name.as_str() { + if let Some(header_value) = header_value.as_str() { + if HeaderValue::from_str(header_value).is_err() + || HeaderName::from_str(header_name).is_err() + { + Err(anyhow::anyhow!("Invalid custom headers"))? + } + } else { + Err(anyhow::anyhow!("Invalid custom headers"))? + } + } else { + Err(anyhow::anyhow!("Invalid custom headers"))? + } + } + } else { + Err(anyhow::anyhow!("Invalid custom headers"))? + } + } + + if used_properties.contains("rewriteMap") { + if let Some(rewrite_map) = config["rewriteMap"].as_vec() { + let rewrite_map_iter = rewrite_map.iter(); + for rewrite_map_entry_yaml in rewrite_map_iter { + if !rewrite_map_entry_yaml.is_hash() { + Err(anyhow::anyhow!("Invalid URL rewrite map"))? + } + if rewrite_map_entry_yaml["regex"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid URL rewrite map"))? + } + if rewrite_map_entry_yaml["replacement"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid URL rewrite map"))? + } + if !rewrite_map_entry_yaml["isNotFile"].is_badvalue() + && rewrite_map_entry_yaml["isNotFile"].as_bool().is_none() + { + Err(anyhow::anyhow!("Invalid URL rewrite map"))? + } + if !rewrite_map_entry_yaml["isNotDirectory"].is_badvalue() + && rewrite_map_entry_yaml["isNotDirectory"].as_bool().is_none() + { + Err(anyhow::anyhow!("Invalid URL rewrite map"))? + } + if !rewrite_map_entry_yaml["allowDoubleSlashes"].is_badvalue() + && rewrite_map_entry_yaml["allowDoubleSlashes"] + .as_bool() + .is_none() + { + Err(anyhow::anyhow!("Invalid URL rewrite map"))? + } + if !rewrite_map_entry_yaml["last"].is_badvalue() + && rewrite_map_entry_yaml["last"].as_bool().is_none() + { + Err(anyhow::anyhow!("Invalid URL rewrite map"))? + } + } + } else { + Err(anyhow::anyhow!("Invalid URL rewrite map"))? + } + } + + if used_properties.contains("enableRewriteLogging") + && config["enableRewriteLogging"].as_bool().is_none() + { + Err(anyhow::anyhow!( + "Invalid URL rewrite logging enabling option value" + ))? + } + + if used_properties.contains("disableTrailingSlashRedirects") + && config["disableTrailingSlashRedirects"].as_bool().is_none() + { + Err(anyhow::anyhow!( + "Invalid trailing slash redirect disabling option value" + ))? + } + + if used_properties.contains("users") { + if let Some(users) = config["users"].as_vec() { + let users_iter = users.iter(); + for user_yaml in users_iter { + if !user_yaml.is_hash() { + Err(anyhow::anyhow!("Invalid user configuration"))? + } + if user_yaml["name"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid user configuration"))? + } + if user_yaml["pass"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid user configuration"))? + } + } + } else { + Err(anyhow::anyhow!("Invalid user configuration"))? + } + } + + if used_properties.contains("nonStandardCodes") { + if let Some(non_standard_codes) = config["nonStandardCodes"].as_vec() { + let non_standard_codes_iter = non_standard_codes.iter(); + for non_standard_code_yaml in non_standard_codes_iter { + if !non_standard_code_yaml.is_hash() { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + if non_standard_code_yaml["scode"].as_i64().is_none() { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + if !non_standard_code_yaml["regex"].is_badvalue() + && non_standard_code_yaml["regex"].as_str().is_none() + { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + if !non_standard_code_yaml["url"].is_badvalue() + && non_standard_code_yaml["url"].as_str().is_none() + { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + if non_standard_code_yaml["regex"].is_badvalue() + && non_standard_code_yaml["url"].is_badvalue() + { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + if !non_standard_code_yaml["realm"].is_badvalue() + && non_standard_code_yaml["realm"].as_str().is_none() + { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + if !non_standard_code_yaml["disableBruteProtection"].is_badvalue() + && non_standard_code_yaml["disableBruteProtection"] + .as_bool() + .is_none() + { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + if !non_standard_code_yaml["userList"].is_badvalue() { + if let Some(users) = non_standard_code_yaml["userList"].as_vec() { + let users_iter = users.iter(); + for user_yaml in users_iter { + if user_yaml.as_str().is_none() { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + } + } else { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + } + if !non_standard_code_yaml["users"].is_badvalue() { + if let Some(users) = non_standard_code_yaml["users"].as_vec() { + let users_iter = users.iter(); + for user_yaml in users_iter { + match user_yaml.as_str() { + Some(user) => { + if !validate_ip(user) { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + } + None => Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))?, + } + } + } else { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + } + } + } else { + Err(anyhow::anyhow!( + "Invalid non-standard status code configuration" + ))? + } + } + + if used_properties.contains("errorPages") { + if let Some(error_pages) = config["errorPages"].as_vec() { + let error_pages_iter = error_pages.iter(); + for error_page_yaml in error_pages_iter { + if !error_page_yaml.is_hash() { + Err(anyhow::anyhow!("Invalid custom error page configuration"))? + } + if error_page_yaml["scode"].as_i64().is_none() { + Err(anyhow::anyhow!("Invalid custom error page configuration"))? + } + if error_page_yaml["path"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid custom error page configuration"))? + } + } + } else { + Err(anyhow::anyhow!("Invalid custom error page configuration"))? + } + } + + if used_properties.contains("wwwroot") && config["wwwroot"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid webroot"))? + } + + if used_properties.contains("enableETag") && config["enableETag"].as_bool().is_none() { + Err(anyhow::anyhow!("Invalid ETag enabling option"))? + } + + if used_properties.contains("enableCompression") + && config["enableCompression"].as_bool().is_none() + { + Err(anyhow::anyhow!("Invalid HTTP compression enabling option"))? + } + + if used_properties.contains("enableDirectoryListing") + && config["enableDirectoryListing"].as_bool().is_none() + { + Err(anyhow::anyhow!("Invalid directory listing enabling option"))? + } + + if used_properties.contains("enableAutomaticTLS") { + if !is_global { + Err(anyhow::anyhow!( + "Automatic TLS enabling configuration is not allowed in host configuration" + ))? + } + if config["enableAutomaticTLS"].as_bool().is_none() { + Err(anyhow::anyhow!( + "Invalid automatic TLS enabling option value" + ))? + } + } + + if used_properties.contains("useAutomaticTLSHTTPChallenge") { + if !is_global { + Err(anyhow::anyhow!( + "Automatic TLS HTTP challenge enabling configuration is not allowed in host configuration" + ))? + } + if config["useAutomaticTLSHTTPChallenge"].as_bool().is_none() { + Err(anyhow::anyhow!( + "Invalid automatic TLS HTTP challenge enabling option value" + ))? + } + } + + if used_properties.contains("automaticTLSContactEmail") { + if !is_global { + Err(anyhow::anyhow!( + "Automatic TLS contact email address configuration is not allowed in host configuration" + ))? + } + if config["automaticTLSContactEmail"].as_str().is_none() { + Err(anyhow::anyhow!( + "Invalid automatic TLS contact email address" + ))? + } + } + + if used_properties.contains("automaticTLSContactCacheDirectory") { + if !is_global { + Err(anyhow::anyhow!( + "Automatic TLS cache directory configuration is not allowed in host configuration" + ))? + } + if config["automaticTLSContactCacheDirectory"] + .as_str() + .is_none() + { + Err(anyhow::anyhow!( + "Invalid automatic TLS cache directory path" + ))? + } + } + + if used_properties.contains("automaticTLSLetsEncryptProduction") { + if !is_global { + Err(anyhow::anyhow!( + "Let's Encrypt production endpoint for automatic TLS enabling configuration is not allowed in host configuration" + ))? + } + if config["automaticTLSLetsEncryptProduction"] + .as_bool() + .is_none() + { + Err(anyhow::anyhow!( + "Invalid Let's Encrypt production endpoint for automatic TLS enabling option value" + ))? + } + } + + if used_properties.contains("timeout") { + if !is_global { + Err(anyhow::anyhow!( + "Server timeout configuration is not allowed in host configuration" + ))? + } + if !config["timeout"].is_null() { + if let Some(maximum_cache_response_size) = config["timeout"].as_i64() { + if maximum_cache_response_size < 0 { + Err(anyhow::anyhow!("Invalid server timeout"))? + } + } else { + Err(anyhow::anyhow!("Invalid server timeout"))? + } + } + } + + for module_optional_builtin in modules_optional_builtin.iter() { + match module_optional_builtin as &str { + #[cfg(feature = "rproxy")] + "rproxy" => { + if used_properties.contains("proxyTo") { + if let Some(proxy_urls) = config["proxyTo"].as_vec() { + let proxy_urls_iter = proxy_urls.iter(); + for proxy_url_yaml in proxy_urls_iter { + if proxy_url_yaml.as_str().is_none() { + Err(anyhow::anyhow!("Invalid reverse proxy target URL value"))? + } + } + } else if config["proxyTo"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid reverse proxy target URL value"))? + } + } + + if used_properties.contains("secureProxyTo") { + if let Some(proxy_urls) = config["secureProxyTo"].as_vec() { + let proxy_urls_iter = proxy_urls.iter(); + for proxy_url_yaml in proxy_urls_iter { + if proxy_url_yaml.as_str().is_none() { + Err(anyhow::anyhow!( + "Invalid secure reverse proxy target URL value" + ))? + } + } + } else if config["secureProxyTo"].as_str().is_none() { + Err(anyhow::anyhow!( + "Invalid secure reverse proxy target URL value" + ))? + } + } + + if used_properties.contains("enableLoadBalancerHealthCheck") + && config["enableLoadBalancerHealthCheck"].as_bool().is_none() + { + Err(anyhow::anyhow!( + "Invalid load balancer health check enabling option value" + ))? + } + + if used_properties.contains("loadBalancerHealthCheckMaximumFails") { + if let Some(window) = config["loadBalancerHealthCheckMaximumFails"].as_i64() { + if window < 0 { + Err(anyhow::anyhow!( + "Invalid load balancer health check maximum fails value" + ))? + } + } else { + Err(anyhow::anyhow!( + "Invalid load balancer health check maximum fails value" + ))? + } + } + + if used_properties.contains("loadBalancerHealthCheckWindow") { + if !is_global { + Err(anyhow::anyhow!( + "Load balancer health check window configuration is not allowed in host configuration" + ))? + } + if let Some(window) = config["loadBalancerHealthCheckWindow"].as_i64() { + if window < 0 { + Err(anyhow::anyhow!( + "Invalid load balancer health check window value" + ))? + } + } else { + Err(anyhow::anyhow!( + "Invalid load balancer health check window value" + ))? + } + } + + if used_properties.contains("disableProxyCertificateVerification") + && config["disableProxyCertificateVerification"] + .as_bool() + .is_none() + { + Err(anyhow::anyhow!( + "Invalid proxy certificate verification disabling option value" + ))? + } + } + #[cfg(feature = "cache")] + "cache" => { + if used_properties.contains("cacheVaryHeaders") { + if let Some(modules) = config["cacheVaryHeaders"].as_vec() { + let modules_iter = modules.iter(); + for module_name_yaml in modules_iter { + if module_name_yaml.as_str().is_none() { + Err(anyhow::anyhow!("Invalid varying cache header"))? + } + } + } else { + Err(anyhow::anyhow!( + "Invalid varying cache headers configuration" + ))? + } + } + + if used_properties.contains("cacheIgnoreHeaders") { + if let Some(modules) = config["cacheIgnoreHeaders"].as_vec() { + let modules_iter = modules.iter(); + for module_name_yaml in modules_iter { + if module_name_yaml.as_str().is_none() { + Err(anyhow::anyhow!("Invalid ignored cache header"))? + } + } + } else { + Err(anyhow::anyhow!( + "Invalid ignored cache headers configuration" + ))? + } + } + + if used_properties.contains("maximumCacheResponseSize") + && !config["maximumCacheResponseSize"].is_null() + { + if let Some(maximum_cache_response_size) = config["maximumCacheResponseSize"].as_i64() { + if maximum_cache_response_size < 0 { + Err(anyhow::anyhow!("Invalid maximum cache response size"))? + } + } else { + Err(anyhow::anyhow!("Invalid maximum cache response size"))? + } + } + + if used_properties.contains("maximumCacheEntries") { + if !is_global { + Err(anyhow::anyhow!( + "Maximum cache entries configuration is not allowed in host configuration" + ))? + } + if !config["maximumCacheEntries"].is_null() { + if let Some(maximum_cache_response_size) = config["maximumCacheEntries"].as_i64() { + if maximum_cache_response_size < 0 { + Err(anyhow::anyhow!("Invalid maximum cache entries"))? + } + } else { + Err(anyhow::anyhow!("Invalid maximum cache entries"))? + } + } + } + } + #[cfg(feature = "cgi")] + "cgi" => { + if used_properties.contains("cgiScriptExtensions") { + if let Some(cgi_script_extensions) = config["cgiScriptExtensions"].as_vec() { + let cgi_script_extensions_iter = cgi_script_extensions.iter(); + for cgi_script_extension_yaml in cgi_script_extensions_iter { + if cgi_script_extension_yaml.as_str().is_none() { + Err(anyhow::anyhow!("Invalid CGI script extension"))? + } + } + } else { + Err(anyhow::anyhow!( + "Invalid CGI script extension configuration" + ))? + } + } + + if used_properties.contains("cgiScriptInterpreters") { + if let Some(cgi_script_interpreters) = config["cgiScriptInterpreters"].as_hash() { + for (cgi_script_interpreter_extension_unknown, cgi_script_interpreter_params_unknown) in + cgi_script_interpreters.iter() + { + if cgi_script_interpreter_extension_unknown.as_str().is_some() { + if !cgi_script_interpreter_params_unknown.is_null() { + if let Some(cgi_script_interpreter_params) = + cgi_script_interpreter_params_unknown.as_vec() + { + let cgi_script_interpreter_params_iter = cgi_script_interpreter_params.iter(); + for cgi_script_interpreter_param_yaml in cgi_script_interpreter_params_iter { + if cgi_script_interpreter_param_yaml.as_str().is_none() { + Err(anyhow::anyhow!("Invalid CGI script interpreter parameter"))? + } + } + } else { + Err(anyhow::anyhow!("Invalid CGI script interpreter parameters"))? + } + } + } else { + Err(anyhow::anyhow!("Invalid CGI script interpreter extension"))? + } + } + } else { + Err(anyhow::anyhow!( + "Invalid CGI script interpreter configuration" + ))? + } + } + } + #[cfg(feature = "scgi")] + "scgi" => { + if used_properties.contains("scgiTo") && config["scgiTo"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid SCGI target URL value"))? + } + + if used_properties.contains("scgiPath") && config["scgiPath"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid SCGI path"))? + } + } + #[cfg(feature = "fcgi")] + "fcgi" => { + if used_properties.contains("fcgiScriptExtensions") { + if let Some(fastcgi_script_extensions) = config["fcgiScriptExtensions"].as_vec() { + let fastcgi_script_extensions_iter = fastcgi_script_extensions.iter(); + for fastcgi_script_extension_yaml in fastcgi_script_extensions_iter { + if fastcgi_script_extension_yaml.as_str().is_none() { + Err(anyhow::anyhow!("Invalid CGI script extension"))? + } + } + } else { + Err(anyhow::anyhow!( + "Invalid CGI script extension configuration" + ))? + } + } + + if used_properties.contains("fcgiTo") && config["fcgiTo"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid FastCGI target URL value"))? + } + + if used_properties.contains("fcgiPath") && config["fcgiPath"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid FastCGI path"))? + } + } + #[cfg(feature = "fauth")] + "fauth" => { + if used_properties.contains("authTo") && config["authTo"].as_str().is_none() { + Err(anyhow::anyhow!( + "Invalid forwarded authentication target URL value" + ))? + } + + if used_properties.contains("forwardedAuthCopyHeaders") { + if let Some(modules) = config["forwardedAuthCopyHeaders"].as_vec() { + let modules_iter = modules.iter(); + for module_name_yaml in modules_iter { + if module_name_yaml.as_str().is_none() { + Err(anyhow::anyhow!( + "Invalid forwarded authentication response header to copy" + ))? + } + } + } else { + Err(anyhow::anyhow!( + "Invalid forwarded authentication response headers to copy configuration" + ))? + } + } + } + #[cfg(feature = "wsgi")] + "wsgi" => { + if used_properties.contains("wsgiApplicationPath") + && config["wsgiApplicationPath"].as_str().is_none() + { + Err(anyhow::anyhow!("Invalid path to the WSGI application"))? + } + + if used_properties.contains("wsgiPath") && config["wsgiPath"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid WSGI request base path"))? + } + + if used_properties.contains("wsgiClearModuleImportPath") { + if !is_global { + Err(anyhow::anyhow!( + "WSGI Python module import path clearing option is not allowed in host configuration" + ))? + } + if config["wsgiClearModuleImportPath"].as_bool().is_none() { + Err(anyhow::anyhow!( + "Invalid WSGI Python module import path clearing option value" + ))? + } + } + } + #[cfg(feature = "wsgid")] + "wsgid" => { + if used_properties.contains("wsgidApplicationPath") + && config["wsgidApplicationPath"].as_str().is_none() + { + Err(anyhow::anyhow!( + "Invalid path to the WSGI (with pre-forked process pool) application" + ))? + } + + if used_properties.contains("wsgidPath") && config["wsgidPath"].as_str().is_none() { + Err(anyhow::anyhow!( + "Invalid WSGI (with pre-forked process pool) request base path" + ))? + } + } + #[cfg(feature = "asgi")] + "asgi" => { + if used_properties.contains("asgiApplicationPath") + && config["asgiApplicationPath"].as_str().is_none() + { + Err(anyhow::anyhow!("Invalid path to the ASGI application"))? + } + + if used_properties.contains("asgiPath") && config["asgiPath"].as_str().is_none() { + Err(anyhow::anyhow!("Invalid ASGI request base path"))? + } + + if used_properties.contains("asgiClearModuleImportPath") { + if !is_global { + Err(anyhow::anyhow!( + "ASGI Python module import path clearing option is not allowed in host configuration" + ))? + } + if config["asgiClearModuleImportPath"].as_bool().is_none() { + Err(anyhow::anyhow!( + "Invalid ASGI Python module import path clearing option value" + ))? + } + } + } + _ => (), + } + } + + Ok(used_properties.unused()) +} + +pub fn prepare_config_for_validation( + config: &Yaml, +) -> Result, Box> { + let mut vector = Vec::new(); + if let Some(global_config) = config["global"].as_hash() { + let global_config_yaml = Yaml::Hash(global_config.clone()); + vector.push(global_config_yaml); + } + + let mut vector2 = Vec::new(); + let mut vector3 = Vec::new(); + if !config["hosts"].is_badvalue() { + if let Some(hosts) = config["hosts"].as_vec() { + for host in hosts.iter() { + if !host["locations"].is_badvalue() { + if let Some(locations) = host["locations"].as_vec() { + vector3.append(&mut locations.clone()); + } else { + return Err(anyhow::anyhow!("Invalid location configuration").into()); + } + } + } + vector2 = hosts.clone(); + } else { + return Err(anyhow::anyhow!("Invalid virtual host configuration").into()); + } + } + + let iter = vector + .into_iter() + .map(|item| (item, true, false)) + .chain(vector2.into_iter().map(|item| (item, false, false))) + .chain(vector3.into_iter().map(|item| (item, false, true))); + + Ok(iter) +} diff --git a/ferron/src/util/wsgi_error_stream.rs b/ferron/src/util/wsgi_error_stream.rs new file mode 100644 index 0000000000000000000000000000000000000000..49bc94cf911d3f6c568a63b7b02bcd2924518987 --- /dev/null +++ b/ferron/src/util/wsgi_error_stream.rs @@ -0,0 +1,97 @@ +use crate::ferron_common::ErrorLogger; +use pyo3::prelude::*; + +#[pyclass] +pub struct WsgiErrorStream { + error_logger: ErrorLogger, +} + +impl WsgiErrorStream { + pub fn new(error_logger: ErrorLogger) -> Self { + Self { error_logger } + } +} + +#[pymethods] +impl WsgiErrorStream { + fn write(&self, data: &str) -> PyResult { + futures_lite::future::block_on( + self + .error_logger + .log(&format!("There was a WSGI error: {}", data)), + ); + Ok(data.len()) + } + + fn writelines(&self, lines: Vec) -> PyResult<()> { + for line in lines { + // Each `log_blocking` call prints a separate line + futures_lite::future::block_on( + self + .error_logger + .log(&format!("There was a WSGI error: {}", line)), + ); + } + Ok(()) + } + + fn flush(&self) -> PyResult<()> { + // This is a no-op function + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::ferron_common::LogMessage; + use async_channel::{bounded, Receiver}; + + #[test] + fn test_write_logs_error_message() { + let (tx, rx): (async_channel::Sender, Receiver) = bounded(100); + let error_logger = ErrorLogger::new(tx); + let wsgi_error_stream = WsgiErrorStream::new(error_logger); + + let data = "Some error occurred"; + let result = wsgi_error_stream.write(data).unwrap(); + + assert_eq!(result, data.len()); + + // Check if the log message was sent + let log_message = rx.recv_blocking().unwrap(); + let (message, is_error) = log_message.get_message(); + assert_eq!(message, format!("There was a WSGI error: {}", data)); + assert!(is_error); + } + + #[test] + fn test_writelines_logs_multiple_error_messages() { + let (tx, rx): (async_channel::Sender, Receiver) = bounded(100); + let error_logger = ErrorLogger::new(tx); + let wsgi_error_stream = WsgiErrorStream::new(error_logger); + + let lines = vec!["Error 1".to_string(), "Error 2".to_string()]; + let result = wsgi_error_stream.writelines(lines.clone()); + + assert!(result.is_ok()); + + // Check if the log messages were sent + for line in lines { + let log_message = rx.recv_blocking().unwrap(); + let (message, is_error) = log_message.get_message(); + assert_eq!(message, format!("There was a WSGI error: {}", line)); + assert!(is_error); + } + } + + #[test] + fn test_flush_no_op() { + let (tx, _rx): (async_channel::Sender, Receiver) = bounded(100); + let error_logger = ErrorLogger::new(tx); + let wsgi_error_stream = WsgiErrorStream::new(error_logger); + + let result = wsgi_error_stream.flush(); + assert!(result.is_ok()); + } +} diff --git a/ferron/src/util/wsgi_input_stream.rs b/ferron/src/util/wsgi_input_stream.rs new file mode 100644 index 0000000000000000000000000000000000000000..1310d1ba682d1296c35bf89bfc35752cb0bffcc8 --- /dev/null +++ b/ferron/src/util/wsgi_input_stream.rs @@ -0,0 +1,172 @@ +use std::pin::Pin; + +use pyo3::prelude::*; +use tokio::io::{AsyncBufRead, AsyncBufReadExt, AsyncReadExt}; + +#[pyclass] +pub struct WsgiInputStream { + body_reader: Pin>, +} + +impl WsgiInputStream { + pub fn new(body_reader: impl AsyncBufRead + Send + Sync + 'static) -> Self { + Self { + body_reader: Box::pin(body_reader), + } + } +} + +#[pymethods] +impl WsgiInputStream { + fn read(&mut self, size: usize) -> PyResult> { + let mut buffer = vec![0u8; size]; + let read_bytes = futures_lite::future::block_on(self.body_reader.read(&mut buffer))?; + Ok(buffer[0..read_bytes].to_vec()) + } + + #[pyo3(signature = (size=-1))] + fn readline(&mut self, size: Option) -> PyResult> { + let mut buffer = Vec::new(); + let size = if size.is_none_or(|s| s < 0) { + None + } else { + size.map(|s| s as usize) + }; + loop { + let reader_buffer = futures_lite::future::block_on(self.body_reader.fill_buf())?.to_vec(); + if reader_buffer.is_empty() { + break; + } + if let Some(eol_position) = reader_buffer.iter().position(|&char| char == b'\n') { + buffer.extend_from_slice( + &reader_buffer[0..size.map_or(eol_position + 1, |size| { + std::cmp::min(size, eol_position + 1) + })], + ); + self.body_reader.consume(eol_position + 1); + break; + } else { + buffer.extend_from_slice(&reader_buffer[0..size.unwrap_or(reader_buffer.len())]); + self.body_reader.consume(reader_buffer.len()); + } + } + Ok(buffer) + } + + #[pyo3(signature = (hint=-1))] + fn readlines(&mut self, hint: Option) -> PyResult>> { + let mut total_bytes = 0; + let mut lines = Vec::new(); + let hint = if hint.is_none_or(|s| s < 0) { + None + } else { + hint.map(|s| s as usize) + }; + loop { + let mut line = Vec::new(); + let bytes_read = + futures_lite::future::block_on(self.body_reader.read_until(b'\n', &mut line))?; + if bytes_read == 0 { + break; + } + total_bytes += line.len(); + lines.push(line); + if hint.is_some_and(|hint| hint > total_bytes) { + break; + } + } + Ok(lines) + } + + fn __iter__(this: PyRef<'_, Self>) -> PyRef<'_, Self> { + this + } + + fn __next__(&mut self) -> PyResult>> { + let line = self.readline(None)?; + if line.is_empty() { + // If a "readline()" function in WSGI input stream Python class returns 0 bytes (not even "\n"), it means EOF. + Ok(None) + } else { + Ok(Some(line)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Cursor; + use tokio::io::BufReader; + + fn create_stream(data: &str) -> WsgiInputStream { + let cursor = Cursor::new(data.as_bytes().to_vec()); + let reader = BufReader::new(cursor); + WsgiInputStream::new(reader) + } + + #[test] + fn test_read() { + let mut stream = create_stream("Hello, world!"); + let result = stream.read(5).unwrap(); + assert_eq!(result, b"Hello"); + } + + #[test] + fn test_read_full() { + let mut stream = create_stream("Hello"); + let result = stream.read(10).unwrap(); // try to read more than available + assert_eq!(result, b"Hello"); + } + + #[test] + fn test_readline_no_limit() { + let mut stream = create_stream("line1\nline2\n"); + let result = stream.readline(None).unwrap(); + assert_eq!(result, b"line1\n"); + + let result = stream.readline(None).unwrap(); + assert_eq!(result, b"line2\n"); + } + + #[test] + fn test_readline_with_limit() { + let mut stream = create_stream("line1\nline2\n"); + let result = stream.readline(Some(3)).unwrap(); + assert_eq!(result, b"lin"); // Only 3 bytes + } + + #[test] + fn test_readlines_no_hint() { + let mut stream = create_stream("line1\nline2\nline3\n"); + let result = stream.readlines(None).unwrap(); + assert_eq!(result, vec![b"line1\n", b"line2\n", b"line3\n"]); + } + + #[test] + fn test_readlines_with_hint() { + let mut stream = create_stream("line1\nline2\nline3\n"); + let result = stream.readlines(Some(10)).unwrap(); // Should stop when bytes exceed 10 + let total: usize = result.iter().map(|l| l.len()).sum(); + assert!(total > 0 && total <= 10); + } + + #[test] + fn test_iterator_behavior() { + let mut stream = create_stream("line1\nline2\n"); + + let mut results = Vec::new(); + while let Some(line) = stream.__next__().unwrap() { + results.push(line); + } + + assert_eq!(results, vec![b"line1\n", b"line2\n"]); + } + + #[test] + fn test_iterator_eof() { + let mut stream = create_stream(""); + let result = stream.__next__().unwrap(); + assert_eq!(result, None); + } +} diff --git a/ferron/src/util/wsgi_load_application.rs b/ferron/src/util/wsgi_load_application.rs new file mode 100644 index 0000000000000000000000000000000000000000..a398abc4360532cb4f1b941c9c626d8475f3629d --- /dev/null +++ b/ferron/src/util/wsgi_load_application.rs @@ -0,0 +1,59 @@ +use std::error::Error; +use std::ffi::CString; +use std::path::Path; +use std::str::FromStr; + +use pyo3::prelude::*; +use pyo3::types::PyList; + +pub fn load_wsgi_application( + file_path: &Path, + clear_sys_path: bool, +) -> Result, Box> { + let script_dirname = file_path + .parent() + .map(|path| path.to_string_lossy().to_string()); + let script_name = file_path.to_string_lossy().to_string(); + let script_name_cstring = CString::from_str(&script_name)?; + let module_name = script_name + .strip_suffix(".py") + .unwrap_or(&script_name) + .to_lowercase() + .chars() + .map(|c| if c.is_lowercase() { '_' } else { c }) + .collect::(); + let module_name_cstring = CString::from_str(&module_name)?; + let script_data = std::fs::read_to_string(file_path)?; + let script_data_cstring = CString::from_str(&script_data)?; + let wsgi_application = Python::with_gil(move |py| -> PyResult> { + let mut sys_path_old = None; + if let Some(script_dirname) = script_dirname { + if let Ok(sys_module) = PyModule::import(py, "sys") { + if let Ok(sys_path_any) = sys_module.getattr("path") { + if let Ok(sys_path) = sys_path_any.downcast::() { + let sys_path = sys_path.clone(); + sys_path_old = sys_path.extract::>().ok(); + sys_path.insert(0, script_dirname).unwrap_or_default(); + } + } + } + } + let wsgi_application = PyModule::from_code( + py, + &script_data_cstring, + &script_name_cstring, + &module_name_cstring, + )? + .getattr("application")? + .unbind(); + if clear_sys_path { + if let Some(sys_path) = sys_path_old { + if let Ok(sys_module) = PyModule::import(py, "sys") { + sys_module.setattr("path", sys_path).unwrap_or_default(); + } + } + } + Ok(wsgi_application) + })?; + Ok(wsgi_application) +} diff --git a/ferron/src/util/wsgi_structs.rs b/ferron/src/util/wsgi_structs.rs new file mode 100644 index 0000000000000000000000000000000000000000..df6c47a37f1d21a1b4305554fa68e7721ca6e550 --- /dev/null +++ b/ferron/src/util/wsgi_structs.rs @@ -0,0 +1,45 @@ +use pyo3::types::PyAny; +use pyo3::Py; +use std::sync::Arc; + +pub struct WsgiApplicationWrap { + pub domain: Option, + pub ip: Option, + pub wsgi_application: Option>>, + pub wsgi_path: Option, + pub locations: Vec, +} + +impl WsgiApplicationWrap { + pub fn new( + domain: Option, + ip: Option, + wsgi_application: Option>>, + wsgi_path: Option, + locations: Vec, + ) -> Self { + Self { + domain, + ip, + wsgi_application, + wsgi_path, + locations, + } + } +} + +pub struct WsgiApplicationLocationWrap { + pub path: String, + pub wsgi_application: Arc>, + pub wsgi_path: Option, +} + +impl WsgiApplicationLocationWrap { + pub fn new(path: String, wsgi_application: Arc>, wsgi_path: Option) -> Self { + Self { + path, + wsgi_application, + wsgi_path, + } + } +} diff --git a/ferron/src/util/wsgid_body_reader.rs b/ferron/src/util/wsgid_body_reader.rs new file mode 100644 index 0000000000000000000000000000000000000000..3ced02e2648d7ca23105c3021b14fc3ca4e84569 --- /dev/null +++ b/ferron/src/util/wsgid_body_reader.rs @@ -0,0 +1,298 @@ +use std::io::Read; +use std::sync::Arc; + +use bytes::BufMut; +use interprocess::unnamed_pipe::{Recver, Sender}; +use tokio::sync::Mutex; + +use crate::ferron_util::preforked_process_pool::{read_ipc_message, write_ipc_message}; +use crate::ferron_util::wsgid_message_structs::{ + ProcessPoolToServerMessage, ServerToProcessPoolMessage, +}; + +pub struct WsgidBodyReader { + ipc_tx: Arc>, + ipc_rx: Arc>, + buffer: Vec, + finished: bool, +} + +impl WsgidBodyReader { + pub fn new(ipc_tx: Arc>, ipc_rx: Arc>) -> Self { + Self { + ipc_tx, + ipc_rx, + buffer: Vec::new(), + finished: false, + } + } +} + +impl Read for WsgidBodyReader { + fn read(&mut self, mut buf: &mut [u8]) -> std::io::Result { + if !self.buffer.is_empty() { + let slice_len = std::cmp::min(buf.remaining_mut(), self.buffer.len()); + buf.put_slice(&self.buffer[0..slice_len]); + self.buffer.clear(); + Ok(slice_len) + } else if self.finished { + Ok(0) + } else { + let rx = &mut self.ipc_rx.blocking_lock(); + let tx = &mut self.ipc_tx.blocking_lock(); + + let mut body_fill_with = Vec::new(); + + loop { + write_ipc_message( + tx, + &postcard::to_allocvec::(&ProcessPoolToServerMessage { + application_id: None, + status_code: None, + headers: None, + body_chunk: None, + error_log_line: None, + error_message: None, + requests_body_chunk: true, + }) + .map_err(|e| std::io::Error::other(e.to_string()))?, + )?; + + let received_message = + postcard::from_bytes::(&read_ipc_message(rx)?) + .map_err(|e| std::io::Error::other(e.to_string()))?; + if let Some(body_error_message) = received_message.body_error_message { + return Err(std::io::Error::other(body_error_message)); + } else if let Some(body_chunk) = received_message.body_chunk { + body_fill_with.extend_from_slice(&body_chunk); + if body_fill_with.len() >= buf.remaining_mut() { + break; + } + } else { + self.finished = true; + break; + } + } + + let slice_len = std::cmp::min(buf.len(), body_fill_with.len()); + buf.put_slice(&body_fill_with[0..slice_len]); + if slice_len < body_fill_with.len() { + self + .buffer + .extend_from_slice(&body_fill_with[slice_len..body_fill_with.len()]); + } + Ok(slice_len) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_read_from_ipc() { + let (tx_inner, mut rx_outer) = interprocess::unnamed_pipe::pipe().unwrap(); + let (mut tx_outer, rx_inner) = interprocess::unnamed_pipe::pipe().unwrap(); + let mut reader = WsgidBodyReader::new( + Arc::new(Mutex::new(tx_inner)), + Arc::new(Mutex::new(rx_inner)), + ); + let input = b"Some data"; + write_ipc_message( + &mut tx_outer, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: None, + environment_variables: None, + body_chunk: Some(input.to_vec()), + body_error_message: None, + requests_body_chunk: false, + }) + .unwrap(), + ) + .unwrap(); + write_ipc_message( + &mut tx_outer, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: None, + environment_variables: None, + body_chunk: None, + body_error_message: None, + requests_body_chunk: false, + }) + .unwrap(), + ) + .unwrap(); + + let mut buffer = [0u8; 128]; + let read_bytes = reader.read(&mut buffer).unwrap(); + assert_eq!(read_bytes, input.len()); + assert_eq!(&input[0..read_bytes], input); + + let received_message = + postcard::from_bytes::(&read_ipc_message(&mut rx_outer).unwrap()) + .unwrap(); + assert!(received_message.requests_body_chunk); + let received_message = + postcard::from_bytes::(&read_ipc_message(&mut rx_outer).unwrap()) + .unwrap(); + assert!(received_message.requests_body_chunk); + } + + #[test] + fn test_empty_input() { + let (tx_inner, _rx_outer) = interprocess::unnamed_pipe::pipe().unwrap(); + let (mut tx_outer, rx_inner) = interprocess::unnamed_pipe::pipe().unwrap(); + let mut reader = WsgidBodyReader::new( + Arc::new(Mutex::new(tx_inner)), + Arc::new(Mutex::new(rx_inner)), + ); + write_ipc_message( + &mut tx_outer, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: None, + environment_variables: None, + body_chunk: None, + body_error_message: None, + requests_body_chunk: false, + }) + .unwrap(), + ) + .unwrap(); + + // Simulate receiving no data + let mut buffer = [0u8; 128]; + let read_bytes = reader.read(&mut buffer).unwrap(); + assert_eq!(read_bytes, 0); + } + + #[test] + fn test_multiple_chunks() { + let (tx_inner, _rx_outer) = interprocess::unnamed_pipe::pipe().unwrap(); + let (mut tx_outer, rx_inner) = interprocess::unnamed_pipe::pipe().unwrap(); + let mut reader = WsgidBodyReader::new( + Arc::new(Mutex::new(tx_inner)), + Arc::new(Mutex::new(rx_inner)), + ); + + let input1 = b"First chunk "; + let input2 = b"Second chunk"; + + for chunk in &[input1, input2] { + write_ipc_message( + &mut tx_outer, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: None, + environment_variables: None, + body_chunk: Some(chunk.to_vec()), + body_error_message: None, + requests_body_chunk: false, + }) + .unwrap(), + ) + .unwrap(); + } + + write_ipc_message( + &mut tx_outer, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: None, + environment_variables: None, + body_chunk: None, + body_error_message: None, + requests_body_chunk: false, + }) + .unwrap(), + ) + .unwrap(); + + let mut buffer = [0u8; 64]; + let mut total_read = 0; + loop { + let bytes_read = reader.read(&mut buffer[total_read..]).unwrap(); + if bytes_read == 0 { + break; + } + total_read += bytes_read; + } + + let expected = [input1.to_owned(), input2.to_owned()].concat(); + assert_eq!(&buffer[..total_read], &expected[..]); + } + + #[test] + fn test_error_message() { + let (tx_inner, _rx_outer) = interprocess::unnamed_pipe::pipe().unwrap(); + let (mut tx_outer, rx_inner) = interprocess::unnamed_pipe::pipe().unwrap(); + let mut reader = WsgidBodyReader::new( + Arc::new(Mutex::new(tx_inner)), + Arc::new(Mutex::new(rx_inner)), + ); + + let error_message = "something went wrong".to_string(); + write_ipc_message( + &mut tx_outer, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: None, + environment_variables: None, + body_chunk: None, + body_error_message: Some(error_message.clone()), + requests_body_chunk: false, + }) + .unwrap(), + ) + .unwrap(); + + let mut buffer = [0u8; 128]; + let result = reader.read(&mut buffer); + + assert!(result.is_err()); + assert_eq!(result.unwrap_err().to_string(), error_message); + } + + #[test] + fn test_buffering_behavior() { + let (tx_inner, _rx_outer) = interprocess::unnamed_pipe::pipe().unwrap(); + let (mut tx_outer, rx_inner) = interprocess::unnamed_pipe::pipe().unwrap(); + let mut reader = WsgidBodyReader::new( + Arc::new(Mutex::new(tx_inner)), + Arc::new(Mutex::new(rx_inner)), + ); + + let data = b"This is a long chunk of data"; + write_ipc_message( + &mut tx_outer, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: None, + environment_variables: None, + body_chunk: Some(data.to_vec()), + body_error_message: None, + requests_body_chunk: false, + }) + .unwrap(), + ) + .unwrap(); + write_ipc_message( + &mut tx_outer, + &postcard::to_allocvec(&ServerToProcessPoolMessage { + application_id: None, + environment_variables: None, + body_chunk: None, + body_error_message: None, + requests_body_chunk: false, + }) + .unwrap(), + ) + .unwrap(); + + let mut buf1 = [0u8; 10]; + let mut buf2 = [0u8; 64]; + + let n1 = reader.read(&mut buf1).unwrap(); + assert_eq!(&buf1[..n1], &data[..n1]); + + let n2 = reader.read(&mut buf2).unwrap(); + let expected_remainder = &data[n1..]; + assert_eq!(&buf2[..n2], expected_remainder); + } +} diff --git a/ferron/src/util/wsgid_error_stream.rs b/ferron/src/util/wsgid_error_stream.rs new file mode 100644 index 0000000000000000000000000000000000000000..1e959563c350ef8837c75bdae81d5e4a65d7866c --- /dev/null +++ b/ferron/src/util/wsgid_error_stream.rs @@ -0,0 +1,97 @@ +use std::sync::Arc; + +use interprocess::unnamed_pipe::Sender; +use pyo3::prelude::*; +use tokio::sync::Mutex; + +use crate::ferron_util::preforked_process_pool::write_ipc_message; +use crate::ferron_util::wsgid_message_structs::ProcessPoolToServerMessage; + +#[pyclass] +pub struct WsgidErrorStream { + ipc_tx: Arc>, +} + +impl WsgidErrorStream { + pub fn new(ipc_tx: Arc>) -> Self { + Self { ipc_tx } + } +} + +#[pymethods] +impl WsgidErrorStream { + fn write(&self, data: &str) -> PyResult { + write_ipc_message( + &mut self.ipc_tx.blocking_lock(), + &postcard::to_allocvec::(&ProcessPoolToServerMessage { + application_id: None, + status_code: None, + headers: None, + body_chunk: None, + error_log_line: Some(data.to_string()), + error_message: None, + requests_body_chunk: false, + }) + .map_err(|e| anyhow::anyhow!(e.to_string()))?, + )?; + Ok(data.len()) + } + + fn writelines(&self, lines: Vec) -> PyResult<()> { + for line in lines { + // Each `write_ipc_message` call prints a separate line + write_ipc_message( + &mut self.ipc_tx.blocking_lock(), + &postcard::to_allocvec::(&ProcessPoolToServerMessage { + application_id: None, + status_code: None, + headers: None, + body_chunk: None, + error_log_line: Some(line), + error_message: None, + requests_body_chunk: false, + }) + .map_err(|e| anyhow::anyhow!(e.to_string()))?, + )?; + } + Ok(()) + } + + fn flush(&self) -> PyResult<()> { + // This is a no-op function + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::ferron_util::preforked_process_pool::read_ipc_message; + + #[test] + fn test_write_sends_correct_data() { + let (tx, mut rx) = interprocess::unnamed_pipe::pipe().unwrap(); + let stream = WsgidErrorStream::new(Arc::new(Mutex::new(tx))); + let input = "error log line"; + let len = stream.write(input).unwrap(); + assert_eq!(len, input.len()); + + let received = read_ipc_message(&mut rx).unwrap(); + let msg: ProcessPoolToServerMessage = postcard::from_bytes(&received).unwrap(); + assert_eq!(msg.error_log_line, Some(input.to_string())); + } + + #[test] + fn test_writelines_sends_each_line() { + let (tx, mut rx) = interprocess::unnamed_pipe::pipe().unwrap(); + let stream = WsgidErrorStream::new(Arc::new(Mutex::new(tx))); + let lines = vec!["line one".into(), "line two".into()]; + stream.writelines(lines.clone()).unwrap(); + + for line in lines { + let received = read_ipc_message(&mut rx).unwrap(); + let msg: ProcessPoolToServerMessage = postcard::from_bytes(&received).unwrap(); + assert_eq!(msg.error_log_line, Some(line.clone())); + } + } +} diff --git a/ferron/src/util/wsgid_input_stream.rs b/ferron/src/util/wsgid_input_stream.rs new file mode 100644 index 0000000000000000000000000000000000000000..f89587a07cc3695d160875253765d00bb4872d9a --- /dev/null +++ b/ferron/src/util/wsgid_input_stream.rs @@ -0,0 +1,169 @@ +use std::io::BufRead; + +use pyo3::prelude::*; + +#[pyclass] +pub struct WsgidInputStream { + body_reader: Box, +} + +impl WsgidInputStream { + pub fn new(body_reader: impl BufRead + Send + Sync + 'static) -> Self { + Self { + body_reader: Box::new(body_reader), + } + } +} + +#[pymethods] +impl WsgidInputStream { + fn read(&mut self, size: usize) -> PyResult> { + let mut buffer = vec![0u8; size]; + let read_bytes = self.body_reader.read(&mut buffer)?; + Ok(buffer[0..read_bytes].to_vec()) + } + + #[pyo3(signature = (size=-1))] + fn readline(&mut self, size: Option) -> PyResult> { + let mut buffer = Vec::new(); + let size = if size.is_none_or(|s| s < 0) { + None + } else { + size.map(|s| s as usize) + }; + loop { + let reader_buffer = self.body_reader.fill_buf()?.to_vec(); + if reader_buffer.is_empty() { + break; + } + if let Some(eol_position) = reader_buffer.iter().position(|&char| char == b'\n') { + buffer.extend_from_slice( + &reader_buffer[0..size.map_or(eol_position + 1, |size| { + std::cmp::min(size, eol_position + 1) + })], + ); + self.body_reader.consume(eol_position + 1); + break; + } else { + buffer.extend_from_slice(&reader_buffer[0..size.unwrap_or(reader_buffer.len())]); + self.body_reader.consume(reader_buffer.len()); + } + } + Ok(buffer) + } + + #[pyo3(signature = (hint=-1))] + fn readlines(&mut self, hint: Option) -> PyResult>> { + let mut total_bytes = 0; + let mut lines = Vec::new(); + let hint = if hint.is_none_or(|s| s < 0) { + None + } else { + hint.map(|s| s as usize) + }; + loop { + let mut line = Vec::new(); + let bytes_read = self.body_reader.read_until(b'\n', &mut line)?; + if bytes_read == 0 { + break; + } + total_bytes += line.len(); + lines.push(line); + if hint.is_some_and(|hint| hint > total_bytes) { + break; + } + } + Ok(lines) + } + + fn __iter__(this: PyRef<'_, Self>) -> PyRef<'_, Self> { + this + } + + fn __next__(&mut self) -> PyResult>> { + let line = self.readline(None)?; + if line.is_empty() { + // If a "readline()" function in WSGI input stream Python class returns 0 bytes (not even "\n"), it means EOF. + Ok(None) + } else { + Ok(Some(line)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::{BufReader, Cursor}; + + fn create_stream(data: &str) -> WsgidInputStream { + let cursor = Cursor::new(data.as_bytes().to_vec()); + let reader = BufReader::new(cursor); + WsgidInputStream::new(reader) + } + + #[test] + fn test_read() { + let mut stream = create_stream("Hello, world!"); + let result = stream.read(5).unwrap(); + assert_eq!(result, b"Hello"); + } + + #[test] + fn test_read_full() { + let mut stream = create_stream("Hello"); + let result = stream.read(10).unwrap(); // try to read more than available + assert_eq!(result, b"Hello"); + } + + #[test] + fn test_readline_no_limit() { + let mut stream = create_stream("line1\nline2\n"); + let result = stream.readline(None).unwrap(); + assert_eq!(result, b"line1\n"); + + let result = stream.readline(None).unwrap(); + assert_eq!(result, b"line2\n"); + } + + #[test] + fn test_readline_with_limit() { + let mut stream = create_stream("line1\nline2\n"); + let result = stream.readline(Some(3)).unwrap(); + assert_eq!(result, b"lin"); // Only 3 bytes + } + + #[test] + fn test_readlines_no_hint() { + let mut stream = create_stream("line1\nline2\nline3\n"); + let result = stream.readlines(None).unwrap(); + assert_eq!(result, vec![b"line1\n", b"line2\n", b"line3\n"]); + } + + #[test] + fn test_readlines_with_hint() { + let mut stream = create_stream("line1\nline2\nline3\n"); + let result = stream.readlines(Some(10)).unwrap(); // Should stop when bytes exceed 10 + let total: usize = result.iter().map(|l| l.len()).sum(); + assert!(total > 0 && total <= 10); + } + + #[test] + fn test_iterator_behavior() { + let mut stream = create_stream("line1\nline2\n"); + + let mut results = Vec::new(); + while let Some(line) = stream.__next__().unwrap() { + results.push(line); + } + + assert_eq!(results, vec![b"line1\n", b"line2\n"]); + } + + #[test] + fn test_iterator_eof() { + let mut stream = create_stream(""); + let result = stream.__next__().unwrap(); + assert_eq!(result, None); + } +} diff --git a/ferron/src/util/wsgid_message_structs.rs b/ferron/src/util/wsgid_message_structs.rs new file mode 100644 index 0000000000000000000000000000000000000000..0d9abef29c7a32186a34a9b8c7bbc32f7fffb349 --- /dev/null +++ b/ferron/src/util/wsgid_message_structs.rs @@ -0,0 +1,24 @@ +use hashlink::LinkedHashMap; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub struct ServerToProcessPoolMessage { + pub application_id: Option, + pub environment_variables: Option>, + #[serde(with = "serde_bytes")] + pub body_chunk: Option>, + pub body_error_message: Option, + pub requests_body_chunk: bool, +} + +#[derive(Serialize, Deserialize)] +pub struct ProcessPoolToServerMessage { + pub application_id: Option, + pub status_code: Option, + pub headers: Option>>, + #[serde(with = "serde_bytes")] + pub body_chunk: Option>, + pub error_log_line: Option, + pub error_message: Option, + pub requests_body_chunk: bool, +} diff --git a/ferron/src/util/wsgid_structs.rs b/ferron/src/util/wsgid_structs.rs new file mode 100644 index 0000000000000000000000000000000000000000..56cd9c7b5f05d03f622f4ec16508c679ad4ea860 --- /dev/null +++ b/ferron/src/util/wsgid_structs.rs @@ -0,0 +1,49 @@ +use std::sync::Arc; + +use super::preforked_process_pool::PreforkedProcessPool; + +pub struct WsgidApplicationWrap { + pub domain: Option, + pub ip: Option, + pub wsgi_process_pool: Option>, + pub wsgi_path: Option, + pub locations: Vec, +} + +impl WsgidApplicationWrap { + pub fn new( + domain: Option, + ip: Option, + wsgi_process_pool: Option>, + wsgi_path: Option, + locations: Vec, + ) -> Self { + Self { + domain, + ip, + wsgi_process_pool, + wsgi_path, + locations, + } + } +} + +pub struct WsgidApplicationLocationWrap { + pub path: String, + pub wsgi_process_pool: Arc, + pub wsgi_path: Option, +} + +impl WsgidApplicationLocationWrap { + pub fn new( + path: String, + wsgi_process_pool: Arc, + wsgi_path: Option, + ) -> Self { + Self { + path, + wsgi_process_pool, + wsgi_path, + } + } +} diff --git a/logo-dark.png b/logo-dark.png new file mode 100644 index 0000000000000000000000000000000000000000..768d509ebc0d8ba0062c7de3755482efac680367 Binary files /dev/null and b/logo-dark.png differ diff --git a/logo.png b/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..36614150d636ee36bbe2e2da4dc87b7e60945fd0 Binary files /dev/null and b/logo.png differ diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000000000000000000000000000000000000..a1ed13ed632bbdfd20d9024b5fe08a1e1078a561 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,2 @@ +reorder_imports = true +tab_spaces = 2 \ No newline at end of file diff --git a/wwwroot/index.html b/wwwroot/index.html new file mode 100644 index 0000000000000000000000000000000000000000..bd197d3f41cb6115225536e8543e6858da93db8b --- /dev/null +++ b/wwwroot/index.html @@ -0,0 +1,11 @@ + + + + + + It works! + + +

It works!

+ + \ No newline at end of file