Compare commits
145 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c0357b2890 | ||
|
|
4f7f7d6880 | ||
|
|
efba10f839 | ||
|
|
6ba12f35d0 | ||
|
|
6a57c23700 | ||
|
|
94b85afbc5 | ||
|
|
cf717032a1 | ||
|
|
d905de2dad | ||
|
|
c7bd1c98e7 | ||
|
|
d3302d77d2 | ||
|
|
df4494c37a | ||
|
|
b84189b21b | ||
|
|
9243661f56 | ||
|
|
bffe97b2b7 | ||
|
|
bee1dd97ee | ||
|
|
16670e36f5 | ||
|
|
5dad663b25 | ||
|
|
8375608aaa | ||
|
|
0057377ac6 | ||
|
|
078ed65a0e | ||
|
|
9872f0ed1b | ||
|
|
fb0cb54776 | ||
|
|
67bae1cf2a | ||
|
|
eb9ac7fae4 | ||
|
|
8046381939 | ||
|
|
650f9fd2a4 | ||
|
|
d4ebc7b5c6 | ||
|
|
7a4ccf8e82 | ||
|
|
73b40d386a | ||
|
|
3206ce50bb | ||
|
|
bdccb866fe | ||
|
|
9b5b382593 | ||
|
|
9886c9a8e7 | ||
|
|
cb3d32cc89 | ||
|
|
010eb5270f | ||
|
|
e33092530d | ||
|
|
e7d649b57f | ||
|
|
5f3d089003 | ||
|
|
4322509657 | ||
|
|
43990c9dc9 | ||
|
|
c03db683a5 | ||
|
|
168fd59187 | ||
|
|
8bd02d8099 | ||
|
|
a1db082ec0 | ||
|
|
9b9c11e7ab | ||
|
|
274b9d5e94 | ||
|
|
d888df6382 | ||
|
|
011b9a3cbf | ||
|
|
d67a587f3d | ||
|
|
478fc5dd89 | ||
|
|
a0e7210dff | ||
|
|
16b5dc56f0 | ||
|
|
303a6896bf | ||
|
|
9e84528801 | ||
|
|
685c228190 | ||
|
|
febe4d1ac0 | ||
|
|
e4f90cd7c1 | ||
|
|
3013291ea0 | ||
|
|
5d1dce7989 | ||
|
|
864f7fa9a5 | ||
|
|
e54fb3fffc | ||
|
|
dddf9f30dc | ||
|
|
3091b5168f | ||
|
|
ddc91c2d66 | ||
|
|
8072a97f7e | ||
|
|
558155ffaa | ||
|
|
ed329c2075 | ||
|
|
305c088bb7 | ||
|
|
debdbfd73c | ||
|
|
904c17c1b3 | ||
|
|
4a80bc8988 | ||
|
|
f9c41ab703 | ||
|
|
2112ba22f1 | ||
|
|
fbe9277f86 | ||
|
|
d1348e809f | ||
|
|
533613886a | ||
|
|
84f8b786e7 | ||
|
|
32bc3e1387 | ||
|
|
0fa5914501 | ||
|
|
9b790c7bf4 | ||
|
|
eda365c21f | ||
|
|
8de1318c9c | ||
|
|
7e566fd655 | ||
|
|
a80db2ddbc | ||
|
|
0694183ca6 | ||
|
|
1f9fb29a9b | ||
|
|
eccc69b79c | ||
|
|
da108b2d8c | ||
|
|
9d94f55cdc | ||
|
|
94a7058cc6 | ||
|
|
3d2e996cea | ||
|
|
f2455c9cb1 | ||
|
|
427c7dd375 | ||
|
|
e911a21a93 | ||
|
|
edabad87d7 | ||
|
|
2a65d29e3b | ||
|
|
c837a9b0c6 | ||
|
|
f7618416b6 | ||
|
|
0663e71c52 | ||
|
|
0599a6ec8c | ||
|
|
b2d36aac19 | ||
|
|
3d88ec5992 | ||
|
|
a693ed1e33 | ||
|
|
911a504e16 | ||
|
|
56cd0cd1a9 | ||
|
|
358ad65d5f | ||
|
|
2f5df6ade0 | ||
|
|
e3b7be81e7 | ||
|
|
9a25e8e810 | ||
|
|
1a6b39b829 | ||
|
|
a419cbbcf3 | ||
|
|
b97ea1293b | ||
|
|
5f54eb8270 | ||
|
|
06161abbbc | ||
|
|
aee549f745 | ||
|
|
50ec753c05 | ||
|
|
cf34c7e75c | ||
|
|
572e07a7fd | ||
|
|
4b5270137b | ||
|
|
246230c924 | ||
|
|
21416af153 | ||
|
|
b03312fa2e | ||
|
|
bcdbf033b2 | ||
|
|
0a054c4a01 | ||
|
|
eae7ad43d9 | ||
|
|
0894ef0089 | ||
|
|
954916960b | ||
|
|
91d16b96ee | ||
|
|
4bbadbc764 | ||
|
|
e4272ac35c | ||
|
|
46ee91c6b7 | ||
|
|
ad553f8fbb | ||
|
|
c0b4129209 | ||
|
|
fc47e4d584 | ||
|
|
32b16439c8 | ||
|
|
fd27449a26 | ||
|
|
3d13301711 | ||
|
|
963ec7206b | ||
|
|
9047511256 | ||
|
|
4ba907fdcd | ||
|
|
dae19c29a0 | ||
|
|
25530c8c44 | ||
|
|
aee44d3af2 | ||
|
|
714d83bea1 | ||
|
|
e1bfe69b76 |
19
.github/codeql/codeql-config.yml
vendored
Normal file
19
.github/codeql/codeql-config.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
name: "Rust without tests"
|
||||||
|
|
||||||
|
disable-default-queries: false
|
||||||
|
|
||||||
|
queries:
|
||||||
|
- uses: security-extended
|
||||||
|
- uses: security-and-quality
|
||||||
|
- uses: ./.github/codeql/queries
|
||||||
|
|
||||||
|
query-filters:
|
||||||
|
- exclude:
|
||||||
|
id:
|
||||||
|
- rust/unwrap-on-option
|
||||||
|
- rust/unwrap-on-result
|
||||||
|
- rust/expect-used
|
||||||
|
|
||||||
|
analysis:
|
||||||
|
dataflow:
|
||||||
|
default-precision: high
|
||||||
4
.github/workflows/codeql.yml
vendored
4
.github/workflows/codeql.yml
vendored
@@ -2,9 +2,9 @@ name: "CodeQL Advanced"
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ "main" ]
|
branches: [ "*" ]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ "main" ]
|
branches: [ "*" ]
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '0 0 * * 0'
|
- cron: '0 0 * * 0'
|
||||||
|
|
||||||
|
|||||||
98
.github/workflows/release.yml
vendored
Normal file
98
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '[0-9]+.[0-9]+.[0-9]+' # Matches tags like 3.0.0, 3.1.2, etc.
|
||||||
|
workflow_dispatch: # Manual trigger from GitHub Actions UI
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
env:
|
||||||
|
CARGO_TERM_COLOR: always
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Build ${{ matrix.target }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- target: x86_64-unknown-linux-gnu
|
||||||
|
artifact_name: telemt
|
||||||
|
asset_name: telemt-x86_64-linux
|
||||||
|
- target: aarch64-unknown-linux-gnu
|
||||||
|
artifact_name: telemt
|
||||||
|
asset_name: telemt-aarch64-linux
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
|
||||||
|
- name: Install stable Rust toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1 # v1
|
||||||
|
with:
|
||||||
|
toolchain: stable
|
||||||
|
targets: ${{ matrix.target }}
|
||||||
|
|
||||||
|
- name: Install cross-compilation tools
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||||
|
|
||||||
|
- name: Cache cargo registry & build artifacts
|
||||||
|
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cargo/registry
|
||||||
|
~/.cargo/git
|
||||||
|
target
|
||||||
|
key: ${{ runner.os }}-${{ matrix.target }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-${{ matrix.target }}-cargo-
|
||||||
|
|
||||||
|
- name: Install cross
|
||||||
|
run: cargo install cross --git https://github.com/cross-rs/cross
|
||||||
|
|
||||||
|
- name: Build Release
|
||||||
|
run: cross build --release --target ${{ matrix.target }}
|
||||||
|
|
||||||
|
- name: Package binary
|
||||||
|
run: |
|
||||||
|
cd target/${{ matrix.target }}/release
|
||||||
|
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }}
|
||||||
|
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256
|
||||||
|
|
||||||
|
- name: Upload artifact
|
||||||
|
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.asset_name }}
|
||||||
|
path: |
|
||||||
|
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.tar.gz
|
||||||
|
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
|
||||||
|
|
||||||
|
release:
|
||||||
|
name: Create Release
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Download all artifacts
|
||||||
|
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
|
||||||
|
with:
|
||||||
|
path: artifacts
|
||||||
|
|
||||||
|
- name: Create Release
|
||||||
|
uses: softprops/action-gh-release@c95fe1489396fe360a41fb53f90de6ddce8c4c8a # v2.2.1
|
||||||
|
with:
|
||||||
|
files: artifacts/**/*
|
||||||
|
generate_release_notes: true
|
||||||
|
draft: false
|
||||||
|
prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }}
|
||||||
12
.github/workflows/rust.yml
vendored
12
.github/workflows/rust.yml
vendored
@@ -2,9 +2,9 @@ name: Rust
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ main ]
|
branches: [ "*" ]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ main ]
|
branches: [ "*" ]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
CARGO_TERM_COLOR: always
|
CARGO_TERM_COLOR: always
|
||||||
@@ -42,5 +42,13 @@ jobs:
|
|||||||
- name: Build Release
|
- name: Build Release
|
||||||
run: cargo build --release --verbose
|
run: cargo build --release --verbose
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: cargo test --verbose
|
||||||
|
|
||||||
|
# clippy dont fail on warnings because of active development of telemt
|
||||||
|
# and many warnings
|
||||||
|
- name: Run clippy
|
||||||
|
run: cargo clippy -- --cap-lints warn
|
||||||
|
|
||||||
- name: Check for unused dependencies
|
- name: Check for unused dependencies
|
||||||
run: cargo udeps || true
|
run: cargo udeps || true
|
||||||
|
|||||||
58
.kilocode/rules-architect/AGENTS.md
Normal file
58
.kilocode/rules-architect/AGENTS.md
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
# Architect Mode Rules for Telemt
|
||||||
|
|
||||||
|
## Architecture Overview
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TB
|
||||||
|
subgraph Entry
|
||||||
|
Client[Clients] --> Listener[TCP/Unix Listener]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph Proxy Layer
|
||||||
|
Listener --> ClientHandler[ClientHandler]
|
||||||
|
ClientHandler --> Handshake[Handshake Validator]
|
||||||
|
Handshake --> |Valid| Relay[Relay Layer]
|
||||||
|
Handshake --> |Invalid| Masking[Masking/TLS Fronting]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph Transport
|
||||||
|
Relay --> MiddleProxy[Middle-End Proxy Pool]
|
||||||
|
Relay --> DirectRelay[Direct DC Relay]
|
||||||
|
MiddleProxy --> TelegramDC[Telegram DCs]
|
||||||
|
DirectRelay --> TelegramDC
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
## Module Dependencies
|
||||||
|
- [`src/main.rs`](src/main.rs) - Entry point, spawns all async tasks
|
||||||
|
- [`src/config/`](src/config/) - Configuration loading with auto-migration
|
||||||
|
- [`src/error.rs`](src/error.rs) - Error types, must be used by all modules
|
||||||
|
- [`src/crypto/`](src/crypto/) - AES, SHA, random number generation
|
||||||
|
- [`src/protocol/`](src/protocol/) - MTProto constants, frame encoding, obfuscation
|
||||||
|
- [`src/stream/`](src/stream/) - Stream wrappers, buffer pool, frame codecs
|
||||||
|
- [`src/proxy/`](src/proxy/) - Client handling, handshake, relay logic
|
||||||
|
- [`src/transport/`](src/transport/) - Upstream management, middle-proxy, SOCKS support
|
||||||
|
- [`src/stats/`](src/stats/) - Statistics and replay protection
|
||||||
|
- [`src/ip_tracker.rs`](src/ip_tracker.rs) - Per-user IP tracking
|
||||||
|
|
||||||
|
## Key Architectural Constraints
|
||||||
|
|
||||||
|
### Middle-End Proxy Mode
|
||||||
|
- Requires public IP on interface OR 1:1 NAT with STUN probing
|
||||||
|
- Uses separate `proxy-secret` from Telegram (NOT user secrets)
|
||||||
|
- Falls back to direct mode automatically on STUN mismatch
|
||||||
|
|
||||||
|
### TLS Fronting
|
||||||
|
- Invalid handshakes are transparently proxied to `mask_host`
|
||||||
|
- This is critical for DPI evasion - do not change this behavior
|
||||||
|
- `mask_unix_sock` and `mask_host` are mutually exclusive
|
||||||
|
|
||||||
|
### Stream Architecture
|
||||||
|
- Buffer pool is shared globally via Arc - prevents allocation storms
|
||||||
|
- Frame codecs implement tokio-util Encoder/Decoder traits
|
||||||
|
- State machine in [`src/stream/state.rs`](src/stream/state.rs) manages stream transitions
|
||||||
|
|
||||||
|
### Configuration Migration
|
||||||
|
- [`ProxyConfig::load()`](src/config/mod.rs:641) mutates config in-place
|
||||||
|
- New fields must have sensible defaults
|
||||||
|
- DC203 override is auto-injected for CDN/media support
|
||||||
23
.kilocode/rules-code/AGENTS.md
Normal file
23
.kilocode/rules-code/AGENTS.md
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Code Mode Rules for Telemt
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
- Always use [`ProxyError`](src/error.rs:168) from [`src/error.rs`](src/error.rs) for proxy operations
|
||||||
|
- [`HandshakeResult<T,R,W>`](src/error.rs:292) returns streams on bad client - these MUST be returned for masking, never dropped
|
||||||
|
- Use [`Recoverable`](src/error.rs:110) trait to check if errors are retryable
|
||||||
|
|
||||||
|
## Configuration Changes
|
||||||
|
- [`ProxyConfig::load()`](src/config/mod.rs:641) auto-mutates config - new fields should have defaults
|
||||||
|
- DC203 override is auto-injected if missing - do not remove this behavior
|
||||||
|
- When adding config fields, add migration logic in [`ProxyConfig::load()`](src/config/mod.rs:641)
|
||||||
|
|
||||||
|
## Crypto Code
|
||||||
|
- [`SecureRandom`](src/crypto/random.rs) from [`src/crypto/random.rs`](src/crypto/random.rs) must be used for all crypto operations
|
||||||
|
- Never use `rand::thread_rng()` directly - use the shared `Arc<SecureRandom>`
|
||||||
|
|
||||||
|
## Stream Handling
|
||||||
|
- Buffer pool [`BufferPool`](src/stream/buffer_pool.rs) is shared via Arc - always use it instead of allocating
|
||||||
|
- Frame codecs in [`src/stream/frame_codec.rs`](src/stream/frame_codec.rs) implement tokio-util's Encoder/Decoder traits
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
- Tests are inline in modules using `#[cfg(test)]`
|
||||||
|
- Use `cargo test --lib <module_name>` to run tests for specific modules
|
||||||
27
.kilocode/rules-debug/AGENTS.md
Normal file
27
.kilocode/rules-debug/AGENTS.md
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
# Debug Mode Rules for Telemt
|
||||||
|
|
||||||
|
## Logging
|
||||||
|
- `RUST_LOG` environment variable takes absolute priority over all config log levels
|
||||||
|
- Log levels: `trace`, `debug`, `info`, `warn`, `error`
|
||||||
|
- Use `RUST_LOG=debug cargo run` for detailed operational logs
|
||||||
|
- Use `RUST_LOG=trace cargo run` for full protocol-level debugging
|
||||||
|
|
||||||
|
## Middle-End Proxy Debugging
|
||||||
|
- Set `ME_DIAG=1` environment variable for high-precision cryptography diagnostics
|
||||||
|
- STUN probe results are logged at startup - check for mismatch between local and reflected IP
|
||||||
|
- If Middle-End fails, check `proxy_secret_path` points to valid file from https://core.telegram.org/getProxySecret
|
||||||
|
|
||||||
|
## Connection Issues
|
||||||
|
- DC connectivity is logged at startup with RTT measurements
|
||||||
|
- If DC ping fails, check `dc_overrides` for custom addresses
|
||||||
|
- Use `prefer_ipv6=false` in config if IPv6 is unreliable
|
||||||
|
|
||||||
|
## TLS Fronting Issues
|
||||||
|
- Invalid handshakes are proxied to `mask_host` - check this host is reachable
|
||||||
|
- `mask_unix_sock` and `mask_host` are mutually exclusive - only one can be set
|
||||||
|
- If `mask_unix_sock` is set, socket must exist before connections arrive
|
||||||
|
|
||||||
|
## Common Errors
|
||||||
|
- `ReplayAttack` - client replayed a handshake nonce, potential attack
|
||||||
|
- `TimeSkew` - client clock is off, can disable with `ignore_time_skew=true`
|
||||||
|
- `TgHandshakeTimeout` - upstream DC connection failed, check network
|
||||||
40
AGENTS.md
Normal file
40
AGENTS.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# AGENTS.md
|
||||||
|
|
||||||
|
** Use general system promt from AGENTS_SYSTEM_PROMT.md **
|
||||||
|
** Additional techiques and architectury details are here **
|
||||||
|
|
||||||
|
This file provides guidance to agents when working with code in this repository.
|
||||||
|
|
||||||
|
## Build & Test Commands
|
||||||
|
```bash
|
||||||
|
cargo build --release # Production build
|
||||||
|
cargo test # Run all tests
|
||||||
|
cargo test --lib error # Run tests for specific module (error module)
|
||||||
|
cargo bench --bench crypto_bench # Run crypto benchmarks
|
||||||
|
cargo clippy -- -D warnings # Lint with clippy
|
||||||
|
```
|
||||||
|
|
||||||
|
## Project-Specific Conventions
|
||||||
|
|
||||||
|
### Rust Edition
|
||||||
|
- Uses **Rust edition 2024** (not 2021) - specified in Cargo.toml
|
||||||
|
|
||||||
|
### Error Handling Pattern
|
||||||
|
- Custom [`Recoverable`](src/error.rs:110) trait distinguishes recoverable vs fatal errors
|
||||||
|
- [`HandshakeResult<T,R,W>`](src/error.rs:292) returns streams on bad client for masking - do not drop them
|
||||||
|
- Always use [`ProxyError`](src/error.rs:168) from [`src/error.rs`](src/error.rs) for proxy operations
|
||||||
|
|
||||||
|
### Configuration Auto-Migration
|
||||||
|
- [`ProxyConfig::load()`](src/config/mod.rs:641) mutates config with defaults and migrations
|
||||||
|
- DC203 override is auto-injected if missing (required for CDN/media)
|
||||||
|
- `show_link` top-level migrates to `general.links.show`
|
||||||
|
|
||||||
|
### Middle-End Proxy Requirements
|
||||||
|
- Requires public IP on interface OR 1:1 NAT with STUN probing
|
||||||
|
- Falls back to direct mode on STUN/interface mismatch unless `stun_iface_mismatch_ignore=true`
|
||||||
|
- Proxy-secret from Telegram is separate from user secrets
|
||||||
|
|
||||||
|
### TLS Fronting Behavior
|
||||||
|
- Invalid handshakes are transparently proxied to `mask_host` for DPI evasion
|
||||||
|
- `fake_cert_len` is randomized at startup (1024-4096 bytes)
|
||||||
|
- `mask_unix_sock` and `mask_host` are mutually exclusive
|
||||||
207
AGENTS_SYSTEM_PROMT.md
Normal file
207
AGENTS_SYSTEM_PROMT.md
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
## System Prompt — Production Rust Codebase: Modification and Architecture Guidelines
|
||||||
|
|
||||||
|
You are a senior Rust systems engineer acting as a strict code reviewer and implementation partner. Your responses are precise, minimal, and architecturally sound. You are working on a production-grade Rust codebase: follow these rules strictly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 0. Priority Resolution — Scope Control
|
||||||
|
|
||||||
|
This section resolves conflicts between code quality enforcement and scope limitation.
|
||||||
|
|
||||||
|
When editing or extending existing code, you MUST audit the affected files and fix:
|
||||||
|
|
||||||
|
- Comment style violations (missing, non-English, decorative, trailing).
|
||||||
|
- Missing or incorrect documentation on public items.
|
||||||
|
- Comment placement issues (trailing comments → move above the code).
|
||||||
|
|
||||||
|
These are **coordinated changes** — they are always in scope.
|
||||||
|
|
||||||
|
The following changes are FORBIDDEN without explicit user approval:
|
||||||
|
|
||||||
|
- Renaming types, traits, functions, modules, or variables.
|
||||||
|
- Altering business logic, control flow, or data transformations.
|
||||||
|
- Changing module boundaries, architectural layers, or public API surface.
|
||||||
|
- Adding or removing functions, structs, enums, or trait implementations.
|
||||||
|
- Fixing compiler warnings or removing unused code.
|
||||||
|
|
||||||
|
If such issues are found during your work, list them under a `## ⚠️ Out-of-scope observations` section at the end of your response. Include file path, context, and a brief description. Do not apply these changes.
|
||||||
|
|
||||||
|
The user can override this behavior with explicit commands:
|
||||||
|
|
||||||
|
- `"Do not modify existing code"` — touch only what was requested, skip coordinated fixes.
|
||||||
|
- `"Make minimal changes"` — no coordinated fixes, narrowest possible diff.
|
||||||
|
- `"Fix everything"` — apply all coordinated fixes and out-of-scope observations.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 1. Comments and Documentation
|
||||||
|
|
||||||
|
- All comments MUST be written in English.
|
||||||
|
- Write only comments that add technical value: architecture decisions, intent, invariants, non-obvious implementation details.
|
||||||
|
- Place all comments on separate lines above the relevant code.
|
||||||
|
- Use `///` doc-comments for public items. Use `//` for internal clarifications.
|
||||||
|
|
||||||
|
Correct example:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Handles MTProto client authentication and establishes encrypted session state.
|
||||||
|
fn handle_authenticated_client(...) { ... }
|
||||||
|
```
|
||||||
|
|
||||||
|
Incorrect examples:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let x = 5; // set x to 5
|
||||||
|
```
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// This function does stuff
|
||||||
|
fn do_stuff() { ... }
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 2. File Size and Module Structure
|
||||||
|
|
||||||
|
- Files MUST NOT exceed 350–550 lines.
|
||||||
|
- If a file exceeds this limit, split it into submodules organized by responsibility (e.g., protocol, transport, state, handlers).
|
||||||
|
- Parent modules MUST declare and describe their submodules.
|
||||||
|
- Maintain clear architectural boundaries between modules.
|
||||||
|
|
||||||
|
Correct example:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Client connection handling logic.
|
||||||
|
// Submodules:
|
||||||
|
// - handshake: MTProto handshake implementation
|
||||||
|
// - relay: traffic forwarding logic
|
||||||
|
// - state: client session state machine
|
||||||
|
|
||||||
|
pub mod handshake;
|
||||||
|
pub mod relay;
|
||||||
|
pub mod state;
|
||||||
|
```
|
||||||
|
|
||||||
|
Git discipline:
|
||||||
|
|
||||||
|
- Use local git for versioning and diffs.
|
||||||
|
- Write clear, descriptive commit messages in English that explain both *what* changed and *why*.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Formatting
|
||||||
|
|
||||||
|
- Preserve the existing formatting style of the project exactly as-is.
|
||||||
|
- Reformat code only when explicitly instructed to do so.
|
||||||
|
- Do not run `cargo fmt` unless explicitly instructed.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4. Change Safety and Validation
|
||||||
|
|
||||||
|
- If anything is unclear, STOP and ask specific, targeted questions before proceeding.
|
||||||
|
- List exactly what is ambiguous and offer possible interpretations for the user to choose from.
|
||||||
|
- Prefer clarification over assumptions. Do not guess intent, behavior, or missing requirements.
|
||||||
|
- Actively ask questions before making architectural or behavioral changes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. Warnings and Unused Code
|
||||||
|
|
||||||
|
- Leave all warnings, unused variables, functions, imports, and dead code untouched unless explicitly instructed to modify them.
|
||||||
|
- These may be intentional or part of work-in-progress code.
|
||||||
|
- `todo!()` and `unimplemented!()` are permitted and should not be removed or replaced unless explicitly instructed.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 6. Architectural Integrity
|
||||||
|
|
||||||
|
- Preserve existing architecture unless explicitly instructed to refactor.
|
||||||
|
- Do not introduce hidden behavioral changes.
|
||||||
|
- Do not introduce implicit refactors.
|
||||||
|
- Keep changes minimal, isolated, and intentional.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 7. When Modifying Code
|
||||||
|
|
||||||
|
You MUST:
|
||||||
|
|
||||||
|
- Maintain architectural consistency with the existing codebase.
|
||||||
|
- Document non-obvious logic with comments that describe *why*, not *what*.
|
||||||
|
- Limit changes strictly to the requested scope (plus coordinated fixes per Section 0).
|
||||||
|
- Keep all existing symbol names unless renaming is explicitly requested.
|
||||||
|
- Preserve global formatting as-is.
|
||||||
|
|
||||||
|
You MUST NOT:
|
||||||
|
|
||||||
|
- Use placeholders: no `// ... rest of code`, no `// implement here`, no `/* TODO */` stubs that replace existing working code. Write full, working implementation. If the implementation is unclear, ask first.
|
||||||
|
- Refactor code outside the requested scope.
|
||||||
|
- Make speculative improvements.
|
||||||
|
|
||||||
|
Note: `todo!()` and `unimplemented!()` are allowed as idiomatic Rust markers for genuinely unfinished code paths.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 8. Decision Process for Complex Changes
|
||||||
|
|
||||||
|
When facing a non-trivial modification, follow this sequence:
|
||||||
|
|
||||||
|
1. **Clarify**: Restate the task in one sentence to confirm understanding.
|
||||||
|
2. **Assess impact**: Identify which modules, types, and invariants are affected.
|
||||||
|
3. **Propose**: Describe the intended change before implementing it.
|
||||||
|
4. **Implement**: Make the minimal, isolated change.
|
||||||
|
5. **Verify**: Explain why the change preserves existing behavior and architectural integrity.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 9. Context Awareness
|
||||||
|
|
||||||
|
- When provided with partial code, assume the rest of the codebase exists and functions correctly unless stated otherwise.
|
||||||
|
- Reference existing types, functions, and module structures by their actual names as shown in the provided code.
|
||||||
|
- When the provided context is insufficient to make a safe change, request the missing context explicitly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 10. Response Format
|
||||||
|
|
||||||
|
#### Language Policy
|
||||||
|
|
||||||
|
- Code, comments, commit messages, documentation: **English**.
|
||||||
|
- Reasoning and explanations in response text: **Russian**.
|
||||||
|
|
||||||
|
#### Response Structure
|
||||||
|
|
||||||
|
Your response MUST consist of two sections:
|
||||||
|
|
||||||
|
**Section 1: `## Reasoning` (in Russian)**
|
||||||
|
|
||||||
|
- What needs to be done and why.
|
||||||
|
- Which files and modules are affected.
|
||||||
|
- Architectural decisions and their rationale.
|
||||||
|
- Potential risks or side effects.
|
||||||
|
|
||||||
|
**Section 2: `## Changes`**
|
||||||
|
|
||||||
|
- For each modified or created file: the filename on a separate line in backticks, followed by the code block.
|
||||||
|
- For files **under 200 lines**: return the full file with all changes applied.
|
||||||
|
- For files **over 200 lines**: return only the changed functions/blocks with at least 3 lines of surrounding context above and below. If the user requests the full file, provide it.
|
||||||
|
- New files: full file content.
|
||||||
|
- End with a suggested git commit message in English.
|
||||||
|
|
||||||
|
#### Reporting Out-of-Scope Issues
|
||||||
|
|
||||||
|
If during modification you discover issues outside the requested scope (potential bugs, unsafe code, architectural concerns, missing error handling, unused imports, dead code):
|
||||||
|
|
||||||
|
- Do not fix them silently.
|
||||||
|
- List them under `## ⚠️ Out-of-scope observations` at the end of your response.
|
||||||
|
- Include: file path, line/function context, brief description of the issue, and severity estimate.
|
||||||
|
|
||||||
|
#### Splitting Protocol
|
||||||
|
|
||||||
|
If the response exceeds the output limit:
|
||||||
|
|
||||||
|
1. End the current part with: **SPLIT: PART N — CONTINUE? (remaining: file_list)**
|
||||||
|
2. List the files that will be provided in subsequent parts.
|
||||||
|
3. Wait for user confirmation before continuing.
|
||||||
|
4. No single file may be split across parts.
|
||||||
79
Cargo.lock
generated
79
Cargo.lock
generated
@@ -437,6 +437,12 @@ version = "0.1.5"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
|
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "foldhash"
|
||||||
|
version = "0.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "form_urlencoded"
|
name = "form_urlencoded"
|
||||||
version = "1.2.2"
|
version = "1.2.2"
|
||||||
@@ -585,6 +591,25 @@ dependencies = [
|
|||||||
"wasip3",
|
"wasip3",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "h2"
|
||||||
|
version = "0.4.13"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54"
|
||||||
|
dependencies = [
|
||||||
|
"atomic-waker",
|
||||||
|
"bytes",
|
||||||
|
"fnv",
|
||||||
|
"futures-core",
|
||||||
|
"futures-sink",
|
||||||
|
"http",
|
||||||
|
"indexmap",
|
||||||
|
"slab",
|
||||||
|
"tokio",
|
||||||
|
"tokio-util",
|
||||||
|
"tracing",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "half"
|
name = "half"
|
||||||
version = "2.7.1"
|
version = "2.7.1"
|
||||||
@@ -608,9 +633,7 @@ version = "0.15.5"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
|
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"allocator-api2",
|
"foldhash 0.1.5",
|
||||||
"equivalent",
|
|
||||||
"foldhash",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -618,6 +641,11 @@ name = "hashbrown"
|
|||||||
version = "0.16.1"
|
version = "0.16.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
|
checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
|
||||||
|
dependencies = [
|
||||||
|
"allocator-api2",
|
||||||
|
"equivalent",
|
||||||
|
"foldhash 0.2.0",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "heck"
|
name = "heck"
|
||||||
@@ -685,6 +713,12 @@ version = "1.10.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87"
|
checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "httpdate"
|
||||||
|
version = "1.0.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hyper"
|
name = "hyper"
|
||||||
version = "1.8.1"
|
version = "1.8.1"
|
||||||
@@ -695,9 +729,11 @@ dependencies = [
|
|||||||
"bytes",
|
"bytes",
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
|
"h2",
|
||||||
"http",
|
"http",
|
||||||
"http-body",
|
"http-body",
|
||||||
"httparse",
|
"httparse",
|
||||||
|
"httpdate",
|
||||||
"itoa",
|
"itoa",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"pin-utils",
|
"pin-utils",
|
||||||
@@ -999,11 +1035,11 @@ checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lru"
|
name = "lru"
|
||||||
version = "0.12.5"
|
version = "0.16.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
|
checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hashbrown 0.15.5",
|
"hashbrown 0.16.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1057,6 +1093,25 @@ dependencies = [
|
|||||||
"windows-sys 0.61.2",
|
"windows-sys 0.61.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "num-bigint"
|
||||||
|
version = "0.4.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9"
|
||||||
|
dependencies = [
|
||||||
|
"num-integer",
|
||||||
|
"num-traits",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "num-integer"
|
||||||
|
version = "0.1.46"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
|
||||||
|
dependencies = [
|
||||||
|
"num-traits",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "num-traits"
|
name = "num-traits"
|
||||||
version = "0.2.19"
|
version = "0.2.19"
|
||||||
@@ -1714,7 +1769,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "telemt"
|
name = "telemt"
|
||||||
version = "1.2.0"
|
version = "3.0.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes",
|
"aes",
|
||||||
"base64",
|
"base64",
|
||||||
@@ -1729,9 +1784,15 @@ dependencies = [
|
|||||||
"futures",
|
"futures",
|
||||||
"hex",
|
"hex",
|
||||||
"hmac",
|
"hmac",
|
||||||
|
"http-body-util",
|
||||||
|
"httpdate",
|
||||||
|
"hyper",
|
||||||
|
"hyper-util",
|
||||||
"libc",
|
"libc",
|
||||||
"lru",
|
"lru",
|
||||||
"md-5",
|
"md-5",
|
||||||
|
"num-bigint",
|
||||||
|
"num-traits",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
"proptest",
|
"proptest",
|
||||||
"rand",
|
"rand",
|
||||||
@@ -1899,8 +1960,12 @@ checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
|
"futures-io",
|
||||||
"futures-sink",
|
"futures-sink",
|
||||||
|
"futures-util",
|
||||||
|
"hashbrown 0.15.5",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
|
"slab",
|
||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
14
Cargo.toml
14
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "telemt"
|
name = "telemt"
|
||||||
version = "1.2.0"
|
version = "3.0.4"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
@@ -9,7 +9,7 @@ libc = "0.2"
|
|||||||
|
|
||||||
# Async runtime
|
# Async runtime
|
||||||
tokio = { version = "1.42", features = ["full", "tracing"] }
|
tokio = { version = "1.42", features = ["full", "tracing"] }
|
||||||
tokio-util = { version = "0.7", features = ["codec"] }
|
tokio-util = { version = "0.7", features = ["full"] }
|
||||||
|
|
||||||
# Crypto
|
# Crypto
|
||||||
aes = "0.8"
|
aes = "0.8"
|
||||||
@@ -37,7 +37,7 @@ tracing = "0.1"
|
|||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
dashmap = "5.5"
|
dashmap = "5.5"
|
||||||
lru = "0.12"
|
lru = "0.16"
|
||||||
rand = "0.9"
|
rand = "0.9"
|
||||||
chrono = { version = "0.4", features = ["serde"] }
|
chrono = { version = "0.4", features = ["serde"] }
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
@@ -45,9 +45,15 @@ base64 = "0.22"
|
|||||||
url = "2.5"
|
url = "2.5"
|
||||||
regex = "1.11"
|
regex = "1.11"
|
||||||
crossbeam-queue = "0.3"
|
crossbeam-queue = "0.3"
|
||||||
|
num-bigint = "0.4"
|
||||||
|
num-traits = "0.2"
|
||||||
|
|
||||||
# HTTP
|
# HTTP
|
||||||
reqwest = { version = "0.12", features = ["rustls-tls"], default-features = false }
|
reqwest = { version = "0.12", features = ["rustls-tls"], default-features = false }
|
||||||
|
hyper = { version = "1", features = ["server", "http1"] }
|
||||||
|
hyper-util = { version = "0.1", features = ["tokio", "server-auto"] }
|
||||||
|
http-body-util = "0.1"
|
||||||
|
httpdate = "1.0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio-test = "0.4"
|
tokio-test = "0.4"
|
||||||
@@ -57,4 +63,4 @@ futures = "0.3"
|
|||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "crypto_bench"
|
name = "crypto_bench"
|
||||||
harness = false
|
harness = false
|
||||||
|
|||||||
43
Dockerfile
Normal file
43
Dockerfile
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# ==========================
|
||||||
|
# Stage 1: Build
|
||||||
|
# ==========================
|
||||||
|
FROM rust:1.85-slim-bookworm AS builder
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
pkg-config \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
COPY Cargo.toml Cargo.lock* ./
|
||||||
|
RUN mkdir src && echo 'fn main() {}' > src/main.rs && \
|
||||||
|
cargo build --release 2>/dev/null || true && \
|
||||||
|
rm -rf src
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
RUN cargo build --release && strip target/release/telemt
|
||||||
|
|
||||||
|
# ==========================
|
||||||
|
# Stage 2: Runtime
|
||||||
|
# ==========================
|
||||||
|
FROM debian:bookworm-slim
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN useradd -r -s /usr/sbin/nologin telemt
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY --from=builder /build/target/release/telemt /app/telemt
|
||||||
|
COPY config.toml /app/config.toml
|
||||||
|
|
||||||
|
RUN chown -R telemt:telemt /app
|
||||||
|
USER telemt
|
||||||
|
|
||||||
|
EXPOSE 443
|
||||||
|
EXPOSE 9090
|
||||||
|
|
||||||
|
ENTRYPOINT ["/app/telemt"]
|
||||||
|
CMD ["config.toml"]
|
||||||
138
README.md
138
README.md
@@ -2,29 +2,59 @@
|
|||||||
|
|
||||||
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as connection pooling, replay protection, detailed statistics, masking from "prying" eyes
|
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as connection pooling, replay protection, detailed statistics, masking from "prying" eyes
|
||||||
|
|
||||||
## Emergency
|
## NEWS and EMERGENCY
|
||||||
**Важное сообщение для пользователей из России**
|
### ✈️ Telemt 3 is released!
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td width="50%" valign="top">
|
||||||
|
|
||||||
Мы работаем над проектом с Нового года и сейчас готовим новый релиз - 1.2
|
### 🇷🇺 RU
|
||||||
|
|
||||||
В нём имплементируется поддержка Middle Proxy Protocol - основного терминатора для Ad Tag:
|
18 февраля мы опубликовали `telemt 3.0.3`, он имеет:
|
||||||
работа над ним идёт с 6 ферваля, а уже 10 февраля произошли "громкие события"...
|
|
||||||
|
|
||||||
Если у вас есть компетенции в асинхронных сетевых приложениях - мы открыты к предложениям и pull requests
|
- улучшенный механизм Middle-End Health Check
|
||||||
|
- высокоскоростное восстановление инициализации Middle-End
|
||||||
|
- меньше задержек на hot-path
|
||||||
|
- более корректную работу в Dualstack, а именно - IPv6 Middle-End
|
||||||
|
- аккуратное переподключение клиента без дрифта сессий между Middle-End
|
||||||
|
- автоматическая деградация на Direct-DC при массовой (>2 ME-DC-групп) недоступности Middle-End
|
||||||
|
- автодетект IP за NAT, при возможности - будет выполнен хендшейк с ME, при неудаче - автодеградация
|
||||||
|
- единственный известный специальный DC=203 уже добавлен в код: медиа загружаются с CDN в Direct-DC режиме
|
||||||
|
|
||||||
**Important message for users from Russia**
|
[Здесь вы можете найти релиз](https://github.com/telemt/telemt/releases/tag/3.0.3)
|
||||||
|
|
||||||
We've been working on the project since December 30 and are currently preparing a new release – 1.2
|
Если у вас есть компетенции в асинхронных сетевых приложениях, анализе трафика, реверс-инжиниринге или сетевых расследованиях - мы открыты к идеям и pull requests!
|
||||||
|
|
||||||
It implements support for the Middle Proxy Protocol – the primary point for the Ad Tag:
|
</td>
|
||||||
development on it started on February 6th, and by February 10th, "big activity" in Russia had already "taken place"...
|
<td width="50%" valign="top">
|
||||||
|
|
||||||
If you have expertise in asynchronous network applications – we are open to ideas and pull requests!
|
### 🇬🇧 EN
|
||||||
|
|
||||||
|
On February 18, we released `telemt 3.0.3`. This version introduces:
|
||||||
|
|
||||||
|
- improved Middle-End Health Check method
|
||||||
|
- high-speed recovery of Middle-End init
|
||||||
|
- reduced latency on the hot path
|
||||||
|
- correct Dualstack support: proper handling of IPv6 Middle-End
|
||||||
|
- *clean* client reconnection without session "drift" between Middle-End
|
||||||
|
- automatic degradation to Direct-DC mode in case of large-scale (>2 ME-DC groups) Middle-End unavailability
|
||||||
|
- automatic public IP detection behind NAT; first - Middle-End handshake is performed, otherwise automatic degradation is applied
|
||||||
|
- known special DC=203 is now handled natively: media is delivered from the CDN via Direct-DC mode
|
||||||
|
|
||||||
|
[Release is available here](https://github.com/telemt/telemt/releases/tag/3.0.3)
|
||||||
|
|
||||||
|
If you have expertise in asynchronous network applications, traffic analysis, reverse engineering, or network forensics - we welcome ideas and pull requests!
|
||||||
|
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
💥 The configuration structure has changed since version 1.1.0.0, change it in your environment!
|
💥 The configuration structure has changed since version 1.1.0.0. change it in your environment!
|
||||||
|
|
||||||
⚓ Our implementation of **TLS-fronting** is one of the most deeply debugged, focused, advanced and *almost* **"behaviorally consistent to real"**: we are confident we have it right - [see evidence on our validation and traces](#recognizability-for-dpi-and-crawler)
|
⚓ Our implementation of **TLS-fronting** is one of the most deeply debugged, focused, advanced and *almost* **"behaviorally consistent to real"**: we are confident we have it right - [see evidence on our validation and traces](#recognizability-for-dpi-and-crawler)
|
||||||
|
|
||||||
|
⚓ Our ***Middle-End Pool*** is fastest by design in standard scenarios, compared to other implementations of connecting to the Middle-End Proxy: non dramatically, but usual
|
||||||
|
|
||||||
# GOTO
|
# GOTO
|
||||||
- [Features](#features)
|
- [Features](#features)
|
||||||
@@ -44,7 +74,9 @@ If you have expertise in asynchronous network applications – we are open to id
|
|||||||
- [Telegram Calls](#telegram-calls-via-mtproxy)
|
- [Telegram Calls](#telegram-calls-via-mtproxy)
|
||||||
- [DPI](#how-does-dpi-see-mtproxy-tls)
|
- [DPI](#how-does-dpi-see-mtproxy-tls)
|
||||||
- [Whitelist on Network Level](#whitelist-on-ip)
|
- [Whitelist on Network Level](#whitelist-on-ip)
|
||||||
|
- [Too many open files](#too-many-open-files)
|
||||||
- [Build](#build)
|
- [Build](#build)
|
||||||
|
- [Docker](#docker)
|
||||||
- [Why Rust?](#why-rust)
|
- [Why Rust?](#why-rust)
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
@@ -128,6 +160,7 @@ Type=simple
|
|||||||
WorkingDirectory=/bin
|
WorkingDirectory=/bin
|
||||||
ExecStart=/bin/telemt /etc/telemt.toml
|
ExecStart=/bin/telemt /etc/telemt.toml
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
|
LimitNOFILE=65536
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
@@ -143,17 +176,20 @@ then Ctrl+X -> Y -> Enter to save
|
|||||||
## Configuration
|
## Configuration
|
||||||
### Minimal Configuration for First Start
|
### Minimal Configuration for First Start
|
||||||
```toml
|
```toml
|
||||||
# === UI ===
|
|
||||||
# Users to show in the startup log (tg:// links)
|
|
||||||
show_link = ["hello"]
|
|
||||||
|
|
||||||
# === General Settings ===
|
# === General Settings ===
|
||||||
[general]
|
[general]
|
||||||
|
# prefer_ipv6 is deprecated; use [network].prefer
|
||||||
prefer_ipv6 = false
|
prefer_ipv6 = false
|
||||||
fast_mode = true
|
fast_mode = true
|
||||||
use_middle_proxy = false
|
use_middle_proxy = false
|
||||||
# ad_tag = "..."
|
# ad_tag = "..."
|
||||||
|
|
||||||
|
[network]
|
||||||
|
ipv4 = true
|
||||||
|
ipv6 = true # set false to disable, omit for auto
|
||||||
|
prefer = 4 # 4 or 6
|
||||||
|
multipath = false
|
||||||
|
|
||||||
[general.modes]
|
[general.modes]
|
||||||
classic = false
|
classic = false
|
||||||
secure = false
|
secure = false
|
||||||
@@ -170,11 +206,19 @@ listen_addr_ipv6 = "::"
|
|||||||
# Listen on multiple interfaces/IPs (overrides listen_addr_*)
|
# Listen on multiple interfaces/IPs (overrides listen_addr_*)
|
||||||
[[server.listeners]]
|
[[server.listeners]]
|
||||||
ip = "0.0.0.0"
|
ip = "0.0.0.0"
|
||||||
# announce_ip = "1.2.3.4" # Optional: Public IP for tg:// links
|
# announce = "my.hostname.tld" # Optional: hostname for tg:// links
|
||||||
|
# OR
|
||||||
|
# announce = "1.2.3.4" # Optional: Public IP for tg:// links
|
||||||
|
|
||||||
[[server.listeners]]
|
[[server.listeners]]
|
||||||
ip = "::"
|
ip = "::"
|
||||||
|
|
||||||
|
# Users to show in the startup log (tg:// links)
|
||||||
|
[general.links]
|
||||||
|
show = ["hello"] # Users to show in the startup log (tg:// links)
|
||||||
|
# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links
|
||||||
|
# public_port = 443 # Port for tg:// links (default: server.port)
|
||||||
|
|
||||||
# === Timeouts (in seconds) ===
|
# === Timeouts (in seconds) ===
|
||||||
[timeouts]
|
[timeouts]
|
||||||
client_handshake = 15
|
client_handshake = 15
|
||||||
@@ -222,6 +266,10 @@ weight = 10
|
|||||||
# address = "127.0.0.1:9050"
|
# address = "127.0.0.1:9050"
|
||||||
# enabled = false
|
# enabled = false
|
||||||
# weight = 1
|
# weight = 1
|
||||||
|
|
||||||
|
# === DC Address Overrides ===
|
||||||
|
# [dc_overrides]
|
||||||
|
# "203" = "91.105.192.100:443"
|
||||||
```
|
```
|
||||||
### Advanced
|
### Advanced
|
||||||
#### Adtag
|
#### Adtag
|
||||||
@@ -377,6 +425,23 @@ Keep-Alive: timeout=60
|
|||||||
- in China behind the Great Firewall
|
- in China behind the Great Firewall
|
||||||
- in Russia on mobile networks, less in wired networks
|
- in Russia on mobile networks, less in wired networks
|
||||||
- in Iran during "activity"
|
- in Iran during "activity"
|
||||||
|
### Too many open files
|
||||||
|
- On a fresh Linux install the default open file limit is low; under load `telemt` may fail with `Accept error: Too many open files`
|
||||||
|
- **Systemd**: add `LimitNOFILE=65536` to the `[Service]` section (already included in the example above)
|
||||||
|
- **Docker**: add `--ulimit nofile=65536:65536` to your `docker run` command, or in `docker-compose.yml`:
|
||||||
|
```yaml
|
||||||
|
ulimits:
|
||||||
|
nofile:
|
||||||
|
soft: 65536
|
||||||
|
hard: 65536
|
||||||
|
```
|
||||||
|
- **System-wide** (optional): add to `/etc/security/limits.conf`:
|
||||||
|
```
|
||||||
|
* soft nofile 1048576
|
||||||
|
* hard nofile 1048576
|
||||||
|
root soft nofile 1048576
|
||||||
|
root hard nofile 1048576
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## Build
|
## Build
|
||||||
@@ -395,9 +460,44 @@ chmod +x /bin/telemt
|
|||||||
telemt config.toml
|
telemt config.toml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Docker
|
||||||
|
**Quick start (Docker Compose)**
|
||||||
|
|
||||||
|
1. Edit `config.toml` in repo root (at least: port, users secrets, tls_domain)
|
||||||
|
2. Start container:
|
||||||
|
```bash
|
||||||
|
docker compose up -d --build
|
||||||
|
```
|
||||||
|
3. Check logs:
|
||||||
|
```bash
|
||||||
|
docker compose logs -f telemt
|
||||||
|
```
|
||||||
|
4. Stop:
|
||||||
|
```bash
|
||||||
|
docker compose down
|
||||||
|
```
|
||||||
|
|
||||||
|
**Notes**
|
||||||
|
- `docker-compose.yml` maps `./config.toml` to `/app/config.toml` (read-only)
|
||||||
|
- By default it publishes `443:443` and runs with dropped capabilities (only `NET_BIND_SERVICE` is added)
|
||||||
|
- If you really need host networking (usually only for some IPv6 setups) uncomment `network_mode: host`
|
||||||
|
|
||||||
|
**Run without Compose**
|
||||||
|
```bash
|
||||||
|
docker build -t telemt:local .
|
||||||
|
docker run --name telemt --restart unless-stopped \
|
||||||
|
-p 443:443 \
|
||||||
|
-e RUST_LOG=info \
|
||||||
|
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||||
|
--read-only \
|
||||||
|
--cap-drop ALL --cap-add NET_BIND_SERVICE \
|
||||||
|
--ulimit nofile=65536:65536 \
|
||||||
|
telemt:local
|
||||||
|
```
|
||||||
|
|
||||||
## Why Rust?
|
## Why Rust?
|
||||||
- Long-running reliability and idempotent behavior
|
- Long-running reliability and idempotent behavior
|
||||||
- Rust’s deterministic resource management - RAII
|
- Rust's deterministic resource management - RAII
|
||||||
- No garbage collector
|
- No garbage collector
|
||||||
- Memory safety and reduced attack surface
|
- Memory safety and reduced attack surface
|
||||||
- Tokio's asynchronous architecture
|
- Tokio's asynchronous architecture
|
||||||
|
|||||||
34
ROADMAP.md
Normal file
34
ROADMAP.md
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
### 3.0.0 Anschluss
|
||||||
|
- **Middle Proxy now is stable**, confirmed on canary-deploy over ~20 users
|
||||||
|
- Ad-tag now is working
|
||||||
|
- DC=203/CDN now is working over ME
|
||||||
|
- `getProxyConfig` and `ProxySecret` are automated
|
||||||
|
- Version order is now in format `3.0.0` - without Windows-style "microfixes"
|
||||||
|
|
||||||
|
### 3.0.1 Kabelsammler
|
||||||
|
- Handshake timeouts fixed
|
||||||
|
- Connectivity logging refactored
|
||||||
|
- Docker: tmpfs for ProxyConfig and ProxySecret
|
||||||
|
- Public Host and Port in config
|
||||||
|
- ME Relays Head-of-Line Blocking fixed
|
||||||
|
- ME Ping
|
||||||
|
|
||||||
|
### 3.0.2 Microtrencher
|
||||||
|
- New [network] section
|
||||||
|
- ME Fixes
|
||||||
|
- Small bugs coverage
|
||||||
|
|
||||||
|
### 3.0.3 Ausrutscher
|
||||||
|
- ME as stateful, no conn-id migration
|
||||||
|
- No `flush()` on datapath after RpcWriter
|
||||||
|
- Hightech parser for IPv6 without regexp
|
||||||
|
- `nat_probe = true` by default
|
||||||
|
- Timeout for `recv()` in STUN-client
|
||||||
|
- ConnRegistry review
|
||||||
|
- Dualstack emergency reconnect
|
||||||
|
|
||||||
|
### 3.0.4 Schneeflecken
|
||||||
|
- Only WARN and Links in Normal log
|
||||||
|
- Consistent IP-family detection
|
||||||
|
- Includes for config
|
||||||
|
- `nonce_frame_hex` in log only with `DEBUG`
|
||||||
32
config.toml
32
config.toml
@@ -1,13 +1,18 @@
|
|||||||
# === UI ===
|
|
||||||
# Users to show in the startup log (tg:// links)
|
|
||||||
show_link = ["hello"]
|
|
||||||
|
|
||||||
# === General Settings ===
|
# === General Settings ===
|
||||||
[general]
|
[general]
|
||||||
|
# prefer_ipv6 is deprecated; use [network].prefer instead
|
||||||
prefer_ipv6 = false
|
prefer_ipv6 = false
|
||||||
fast_mode = true
|
fast_mode = true
|
||||||
use_middle_proxy = true
|
use_middle_proxy = true
|
||||||
ad_tag = "00000000000000000000000000000000"
|
#ad_tag = "00000000000000000000000000000000"
|
||||||
|
|
||||||
|
[network]
|
||||||
|
# Enable/disable families; ipv6 = true/false/auto(None)
|
||||||
|
ipv4 = true
|
||||||
|
ipv6 = true
|
||||||
|
# prefer = 4 or 6
|
||||||
|
prefer = 4
|
||||||
|
multipath = false
|
||||||
|
|
||||||
# Log level: debug | verbose | normal | silent
|
# Log level: debug | verbose | normal | silent
|
||||||
# Can be overridden with --silent or --log-level CLI flags
|
# Can be overridden with --silent or --log-level CLI flags
|
||||||
@@ -24,6 +29,8 @@ tls = true
|
|||||||
port = 443
|
port = 443
|
||||||
listen_addr_ipv4 = "0.0.0.0"
|
listen_addr_ipv4 = "0.0.0.0"
|
||||||
listen_addr_ipv6 = "::"
|
listen_addr_ipv6 = "::"
|
||||||
|
# listen_unix_sock = "/var/run/telemt.sock" # Unix socket
|
||||||
|
# listen_unix_sock_perm = "0666" # Socket file permissions
|
||||||
# metrics_port = 9090
|
# metrics_port = 9090
|
||||||
# metrics_whitelist = ["127.0.0.1", "::1"]
|
# metrics_whitelist = ["127.0.0.1", "::1"]
|
||||||
|
|
||||||
@@ -35,6 +42,12 @@ ip = "0.0.0.0"
|
|||||||
[[server.listeners]]
|
[[server.listeners]]
|
||||||
ip = "::"
|
ip = "::"
|
||||||
|
|
||||||
|
# Users to show in the startup log (tg:// links)
|
||||||
|
[general.links]
|
||||||
|
show = ["hello"] # Users to show in the startup log (tg:// links)
|
||||||
|
# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links
|
||||||
|
# public_port = 443 # Port for tg:// links (default: server.port)
|
||||||
|
|
||||||
# === Timeouts (in seconds) ===
|
# === Timeouts (in seconds) ===
|
||||||
[timeouts]
|
[timeouts]
|
||||||
client_handshake = 15
|
client_handshake = 15
|
||||||
@@ -64,6 +77,9 @@ hello = "00000000000000000000000000000000"
|
|||||||
# [access.user_max_tcp_conns]
|
# [access.user_max_tcp_conns]
|
||||||
# hello = 50
|
# hello = 50
|
||||||
|
|
||||||
|
# [access.user_max_unique_ips]
|
||||||
|
# hello = 5
|
||||||
|
|
||||||
# [access.user_data_quota]
|
# [access.user_data_quota]
|
||||||
# hello = 1073741824 # 1 GB
|
# hello = 1073741824 # 1 GB
|
||||||
|
|
||||||
@@ -77,4 +93,8 @@ weight = 10
|
|||||||
# type = "socks5"
|
# type = "socks5"
|
||||||
# address = "127.0.0.1:1080"
|
# address = "127.0.0.1:1080"
|
||||||
# enabled = false
|
# enabled = false
|
||||||
# weight = 1
|
# weight = 1
|
||||||
|
|
||||||
|
# === DC Address Overrides ===
|
||||||
|
# [dc_overrides]
|
||||||
|
# "203" = "91.105.192.100:443"
|
||||||
|
|||||||
29
docker-compose.yml
Normal file
29
docker-compose.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
services:
|
||||||
|
telemt:
|
||||||
|
build: .
|
||||||
|
container_name: telemt
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "443:443"
|
||||||
|
- "9090:9090"
|
||||||
|
# Allow caching 'proxy-secret' in read-only container
|
||||||
|
working_dir: /run/telemt
|
||||||
|
volumes:
|
||||||
|
- ./config.toml:/run/telemt/config.toml:ro
|
||||||
|
tmpfs:
|
||||||
|
- /run/telemt:rw,mode=1777,size=1m
|
||||||
|
environment:
|
||||||
|
- RUST_LOG=info
|
||||||
|
# Uncomment this line if you want to use host network for IPv6, but bridge is default and usually better
|
||||||
|
# network_mode: host
|
||||||
|
cap_drop:
|
||||||
|
- ALL
|
||||||
|
cap_add:
|
||||||
|
- NET_BIND_SERVICE # allow binding to port 443
|
||||||
|
read_only: true
|
||||||
|
security_opt:
|
||||||
|
- no-new-privileges:true
|
||||||
|
ulimits:
|
||||||
|
nofile:
|
||||||
|
soft: 65536
|
||||||
|
hard: 65536
|
||||||
@@ -189,11 +189,18 @@ r#"# Telemt MTProxy — auto-generated config
|
|||||||
show_link = ["{username}"]
|
show_link = ["{username}"]
|
||||||
|
|
||||||
[general]
|
[general]
|
||||||
|
# prefer_ipv6 is deprecated; use [network].prefer
|
||||||
prefer_ipv6 = false
|
prefer_ipv6 = false
|
||||||
fast_mode = true
|
fast_mode = true
|
||||||
use_middle_proxy = false
|
use_middle_proxy = false
|
||||||
log_level = "normal"
|
log_level = "normal"
|
||||||
|
|
||||||
|
[network]
|
||||||
|
ipv4 = true
|
||||||
|
ipv6 = true
|
||||||
|
prefer = 4
|
||||||
|
multipath = false
|
||||||
|
|
||||||
[general.modes]
|
[general.modes]
|
||||||
classic = false
|
classic = false
|
||||||
secure = false
|
secure = false
|
||||||
@@ -297,4 +304,4 @@ fn print_links(username: &str, secret: &str, port: u16, domain: &str) {
|
|||||||
println!("The proxy will auto-detect and display the correct link on startup.");
|
println!("The proxy will auto-detect and display the correct link on startup.");
|
||||||
println!("Check: journalctl -u telemt.service | head -30");
|
println!("Check: journalctl -u telemt.service | head -30");
|
||||||
println!("===================");
|
println!("===================");
|
||||||
}
|
}
|
||||||
|
|||||||
105
src/config/defaults.rs
Normal file
105
src/config/defaults.rs
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
use std::net::IpAddr;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use serde::Deserialize;
|
||||||
|
|
||||||
|
// Helper defaults kept private to the config module.
|
||||||
|
pub(crate) fn default_true() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_port() -> u16 {
|
||||||
|
443
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_tls_domain() -> String {
|
||||||
|
"www.google.com".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_mask_port() -> u16 {
|
||||||
|
443
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_fake_cert_len() -> usize {
|
||||||
|
2048
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_replay_check_len() -> usize {
|
||||||
|
65_536
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_replay_window_secs() -> u64 {
|
||||||
|
1800
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_handshake_timeout() -> u64 {
|
||||||
|
15
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_connect_timeout() -> u64 {
|
||||||
|
10
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_keepalive() -> u64 {
|
||||||
|
60
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_ack_timeout() -> u64 {
|
||||||
|
300
|
||||||
|
}
|
||||||
|
pub(crate) fn default_me_one_retry() -> u8 {
|
||||||
|
3
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_one_timeout() -> u64 {
|
||||||
|
1500
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_listen_addr() -> String {
|
||||||
|
"0.0.0.0".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_weight() -> u16 {
|
||||||
|
1
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_metrics_whitelist() -> Vec<IpAddr> {
|
||||||
|
vec!["127.0.0.1".parse().unwrap(), "::1".parse().unwrap()]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_prefer_4() -> u8 {
|
||||||
|
4
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_unknown_dc_log_path() -> Option<String> {
|
||||||
|
Some("unknown-dc.txt".to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Custom deserializer helpers
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[serde(untagged)]
|
||||||
|
pub(crate) enum OneOrMany {
|
||||||
|
One(String),
|
||||||
|
Many(Vec<String>),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn deserialize_dc_overrides<'de, D>(
|
||||||
|
deserializer: D,
|
||||||
|
) -> std::result::Result<HashMap<String, Vec<String>>, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::de::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
let raw: HashMap<String, OneOrMany> = HashMap::deserialize(deserializer)?;
|
||||||
|
let mut out = HashMap::new();
|
||||||
|
for (dc, val) in raw {
|
||||||
|
let mut addrs = match val {
|
||||||
|
OneOrMany::One(s) => vec![s],
|
||||||
|
OneOrMany::Many(v) => v,
|
||||||
|
};
|
||||||
|
addrs.retain(|s| !s.trim().is_empty());
|
||||||
|
if !addrs.is_empty() {
|
||||||
|
out.insert(dc, addrs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(out)
|
||||||
|
}
|
||||||
295
src/config/load.rs
Normal file
295
src/config/load.rs
Normal file
@@ -0,0 +1,295 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use rand::Rng;
|
||||||
|
use tracing::warn;
|
||||||
|
use serde::{Serialize, Deserialize};
|
||||||
|
|
||||||
|
use crate::error::{ProxyError, Result};
|
||||||
|
|
||||||
|
use super::defaults::*;
|
||||||
|
use super::types::*;
|
||||||
|
|
||||||
|
fn validate_network_cfg(net: &mut NetworkConfig) -> Result<()> {
|
||||||
|
if !net.ipv4 && matches!(net.ipv6, Some(false)) {
|
||||||
|
return Err(ProxyError::Config(
|
||||||
|
"Both ipv4 and ipv6 are disabled in [network]".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if net.prefer != 4 && net.prefer != 6 {
|
||||||
|
return Err(ProxyError::Config(
|
||||||
|
"network.prefer must be 4 or 6".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !net.ipv4 && net.prefer == 4 {
|
||||||
|
warn!("prefer=4 but ipv4=false; forcing prefer=6");
|
||||||
|
net.prefer = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
if matches!(net.ipv6, Some(false)) && net.prefer == 6 {
|
||||||
|
warn!("prefer=6 but ipv6=false; forcing prefer=4");
|
||||||
|
net.prefer = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============= Main Config =============
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct ProxyConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub general: GeneralConfig,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub network: NetworkConfig,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub server: ServerConfig,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub timeouts: TimeoutsConfig,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub censorship: AntiCensorshipConfig,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub access: AccessConfig,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub upstreams: Vec<UpstreamConfig>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub show_link: ShowLink,
|
||||||
|
|
||||||
|
/// DC address overrides for non-standard DCs (CDN, media, test, etc.)
|
||||||
|
/// Keys are DC indices as strings, values are one or more "ip:port" addresses.
|
||||||
|
/// Matches the C implementation's `proxy_for <dc_id> <ip>:<port>` config directive.
|
||||||
|
/// Example in config.toml:
|
||||||
|
/// [dc_overrides]
|
||||||
|
/// "203" = ["149.154.175.100:443", "91.105.192.100:443"]
|
||||||
|
#[serde(default, deserialize_with = "deserialize_dc_overrides")]
|
||||||
|
pub dc_overrides: HashMap<String, Vec<String>>,
|
||||||
|
|
||||||
|
/// Default DC index (1-5) for unmapped non-standard DCs.
|
||||||
|
/// Matches the C implementation's `default <dc_id>` config directive.
|
||||||
|
/// If not set, defaults to 2 (matching Telegram's official `default 2;` in proxy-multi.conf).
|
||||||
|
#[serde(default)]
|
||||||
|
pub default_dc: Option<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProxyConfig {
|
||||||
|
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||||
|
let content =
|
||||||
|
std::fs::read_to_string(path).map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||||
|
|
||||||
|
let mut config: ProxyConfig =
|
||||||
|
toml::from_str(&content).map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||||
|
|
||||||
|
// Validate secrets.
|
||||||
|
for (user, secret) in &config.access.users {
|
||||||
|
if !secret.chars().all(|c| c.is_ascii_hexdigit()) || secret.len() != 32 {
|
||||||
|
return Err(ProxyError::InvalidSecret {
|
||||||
|
user: user.clone(),
|
||||||
|
reason: "Must be 32 hex characters".to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate tls_domain.
|
||||||
|
if config.censorship.tls_domain.is_empty() {
|
||||||
|
return Err(ProxyError::Config("tls_domain cannot be empty".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate mask_unix_sock.
|
||||||
|
if let Some(ref sock_path) = config.censorship.mask_unix_sock {
|
||||||
|
if sock_path.is_empty() {
|
||||||
|
return Err(ProxyError::Config(
|
||||||
|
"mask_unix_sock cannot be empty".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
#[cfg(unix)]
|
||||||
|
if sock_path.len() > 107 {
|
||||||
|
return Err(ProxyError::Config(format!(
|
||||||
|
"mask_unix_sock path too long: {} bytes (max 107)",
|
||||||
|
sock_path.len()
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
#[cfg(not(unix))]
|
||||||
|
return Err(ProxyError::Config(
|
||||||
|
"mask_unix_sock is only supported on Unix platforms".to_string(),
|
||||||
|
));
|
||||||
|
|
||||||
|
if config.censorship.mask_host.is_some() {
|
||||||
|
return Err(ProxyError::Config(
|
||||||
|
"mask_unix_sock and mask_host are mutually exclusive".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default mask_host to tls_domain if not set and no unix socket configured.
|
||||||
|
if config.censorship.mask_host.is_none() && config.censorship.mask_unix_sock.is_none() {
|
||||||
|
config.censorship.mask_host = Some(config.censorship.tls_domain.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migration: prefer_ipv6 -> network.prefer.
|
||||||
|
if config.general.prefer_ipv6 {
|
||||||
|
if config.network.prefer == 4 {
|
||||||
|
config.network.prefer = 6;
|
||||||
|
}
|
||||||
|
warn!("prefer_ipv6 is deprecated, use [network].prefer = 6");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auto-enable NAT probe when Middle Proxy is requested.
|
||||||
|
if config.general.use_middle_proxy && !config.general.middle_proxy_nat_probe {
|
||||||
|
config.general.middle_proxy_nat_probe = true;
|
||||||
|
warn!("Auto-enabled middle_proxy_nat_probe for middle proxy mode");
|
||||||
|
}
|
||||||
|
|
||||||
|
validate_network_cfg(&mut config.network)?;
|
||||||
|
|
||||||
|
// Random fake_cert_len.
|
||||||
|
config.censorship.fake_cert_len = rand::rng().gen_range(1024..4096);
|
||||||
|
|
||||||
|
// Resolve listen_tcp: explicit value wins, otherwise auto-detect.
|
||||||
|
// If unix socket is set → TCP only when listen_addr_ipv4 or listeners are explicitly provided.
|
||||||
|
// If no unix socket → TCP always (backward compat).
|
||||||
|
let listen_tcp = config.server.listen_tcp.unwrap_or_else(|| {
|
||||||
|
if config.server.listen_unix_sock.is_some() {
|
||||||
|
// Unix socket present: TCP only if user explicitly set addresses or listeners.
|
||||||
|
config.server.listen_addr_ipv4.is_some()
|
||||||
|
|| !config.server.listeners.is_empty()
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Migration: Populate listeners if empty (skip when listen_tcp = false).
|
||||||
|
if config.server.listeners.is_empty() && listen_tcp {
|
||||||
|
let ipv4_str = config.server.listen_addr_ipv4
|
||||||
|
.as_deref()
|
||||||
|
.unwrap_or("0.0.0.0");
|
||||||
|
if let Ok(ipv4) = ipv4_str.parse::<IpAddr>() {
|
||||||
|
config.server.listeners.push(ListenerConfig {
|
||||||
|
ip: ipv4,
|
||||||
|
announce: None,
|
||||||
|
announce_ip: None,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if let Some(ipv6_str) = &config.server.listen_addr_ipv6 {
|
||||||
|
if let Ok(ipv6) = ipv6_str.parse::<IpAddr>() {
|
||||||
|
config.server.listeners.push(ListenerConfig {
|
||||||
|
ip: ipv6,
|
||||||
|
announce: None,
|
||||||
|
announce_ip: None,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migration: announce_ip → announce for each listener.
|
||||||
|
for listener in &mut config.server.listeners {
|
||||||
|
if listener.announce.is_none() && listener.announce_ip.is_some() {
|
||||||
|
listener.announce = Some(listener.announce_ip.unwrap().to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migration: show_link (top-level) → general.links.show.
|
||||||
|
if !config.show_link.is_empty() && config.general.links.show.is_empty() {
|
||||||
|
config.general.links.show = config.show_link.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migration: Populate upstreams if empty (Default Direct).
|
||||||
|
if config.upstreams.is_empty() {
|
||||||
|
config.upstreams.push(UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct { interface: None },
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure default DC203 override is present.
|
||||||
|
config
|
||||||
|
.dc_overrides
|
||||||
|
.entry("203".to_string())
|
||||||
|
.or_insert_with(|| vec!["91.105.192.100:443".to_string()]);
|
||||||
|
|
||||||
|
Ok(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn validate(&self) -> Result<()> {
|
||||||
|
if self.access.users.is_empty() {
|
||||||
|
return Err(ProxyError::Config("No users configured".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !self.general.modes.classic && !self.general.modes.secure && !self.general.modes.tls {
|
||||||
|
return Err(ProxyError::Config("No modes enabled".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.censorship.tls_domain.contains(' ') || self.censorship.tls_domain.contains('/') {
|
||||||
|
return Err(ProxyError::Config(format!(
|
||||||
|
"Invalid tls_domain: '{}'. Must be a valid domain name",
|
||||||
|
self.censorship.tls_domain
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(tag) = &self.general.ad_tag {
|
||||||
|
let zeros = "00000000000000000000000000000000";
|
||||||
|
if tag == zeros {
|
||||||
|
warn!("ad_tag is all zeros; register a valid proxy tag via @MTProxybot to enable sponsored channel");
|
||||||
|
}
|
||||||
|
if tag.len() != 32 || tag.chars().any(|c| !c.is_ascii_hexdigit()) {
|
||||||
|
warn!("ad_tag is not a 32-char hex string; ensure you use value issued by @MTProxybot");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn dc_overrides_allow_string_and_array() {
|
||||||
|
let toml = r#"
|
||||||
|
[dc_overrides]
|
||||||
|
"201" = "149.154.175.50:443"
|
||||||
|
"202" = ["149.154.167.51:443", "149.154.175.100:443"]
|
||||||
|
"#;
|
||||||
|
let cfg: ProxyConfig = toml::from_str(toml).unwrap();
|
||||||
|
assert_eq!(cfg.dc_overrides["201"], vec!["149.154.175.50:443"]);
|
||||||
|
assert_eq!(
|
||||||
|
cfg.dc_overrides["202"],
|
||||||
|
vec!["149.154.167.51:443", "149.154.175.100:443"]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn dc_overrides_inject_dc203_default() {
|
||||||
|
let toml = r#"
|
||||||
|
[general]
|
||||||
|
use_middle_proxy = false
|
||||||
|
|
||||||
|
[censorship]
|
||||||
|
tls_domain = "example.com"
|
||||||
|
|
||||||
|
[access.users]
|
||||||
|
user = "00000000000000000000000000000000"
|
||||||
|
"#;
|
||||||
|
let dir = std::env::temp_dir();
|
||||||
|
let path = dir.join("telemt_dc_override_test.toml");
|
||||||
|
std::fs::write(&path, toml).unwrap();
|
||||||
|
let cfg = ProxyConfig::load(&path).unwrap();
|
||||||
|
assert!(cfg
|
||||||
|
.dc_overrides
|
||||||
|
.get("203")
|
||||||
|
.map(|v| v.contains(&"91.105.192.100:443".to_string()))
|
||||||
|
.unwrap_or(false));
|
||||||
|
let _ = std::fs::remove_file(path);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,518 +1,8 @@
|
|||||||
//! Configuration
|
//! Configuration.
|
||||||
|
|
||||||
use crate::error::{ProxyError, Result};
|
pub(crate) mod defaults;
|
||||||
use chrono::{DateTime, Utc};
|
mod types;
|
||||||
use serde::{Deserialize, Serialize};
|
mod load;
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::net::{IpAddr, SocketAddr};
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
// ============= Helper Defaults =============
|
pub use load::ProxyConfig;
|
||||||
|
pub use types::*;
|
||||||
fn default_true() -> bool {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
fn default_port() -> u16 {
|
|
||||||
443
|
|
||||||
}
|
|
||||||
fn default_tls_domain() -> String {
|
|
||||||
"www.google.com".to_string()
|
|
||||||
}
|
|
||||||
fn default_mask_port() -> u16 {
|
|
||||||
443
|
|
||||||
}
|
|
||||||
fn default_replay_check_len() -> usize {
|
|
||||||
65536
|
|
||||||
}
|
|
||||||
fn default_replay_window_secs() -> u64 {
|
|
||||||
1800
|
|
||||||
}
|
|
||||||
fn default_handshake_timeout() -> u64 {
|
|
||||||
15
|
|
||||||
}
|
|
||||||
fn default_connect_timeout() -> u64 {
|
|
||||||
10
|
|
||||||
}
|
|
||||||
fn default_keepalive() -> u64 {
|
|
||||||
60
|
|
||||||
}
|
|
||||||
fn default_ack_timeout() -> u64 {
|
|
||||||
300
|
|
||||||
}
|
|
||||||
fn default_listen_addr() -> String {
|
|
||||||
"0.0.0.0".to_string()
|
|
||||||
}
|
|
||||||
fn default_fake_cert_len() -> usize {
|
|
||||||
2048
|
|
||||||
}
|
|
||||||
fn default_weight() -> u16 {
|
|
||||||
1
|
|
||||||
}
|
|
||||||
fn default_metrics_whitelist() -> Vec<IpAddr> {
|
|
||||||
vec!["127.0.0.1".parse().unwrap(), "::1".parse().unwrap()]
|
|
||||||
}
|
|
||||||
|
|
||||||
// ============= Log Level =============
|
|
||||||
|
|
||||||
/// Logging verbosity level
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
pub enum LogLevel {
|
|
||||||
/// All messages including trace (trace + debug + info + warn + error)
|
|
||||||
Debug,
|
|
||||||
/// Detailed operational logs (debug + info + warn + error)
|
|
||||||
Verbose,
|
|
||||||
/// Standard operational logs (info + warn + error)
|
|
||||||
#[default]
|
|
||||||
Normal,
|
|
||||||
/// Minimal output: only warnings and errors (warn + error).
|
|
||||||
/// Startup messages (config, DC connectivity, proxy links) are always shown
|
|
||||||
/// via info! before the filter is applied.
|
|
||||||
Silent,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LogLevel {
|
|
||||||
/// Convert to tracing EnvFilter directive string
|
|
||||||
pub fn to_filter_str(&self) -> &'static str {
|
|
||||||
match self {
|
|
||||||
LogLevel::Debug => "trace",
|
|
||||||
LogLevel::Verbose => "debug",
|
|
||||||
LogLevel::Normal => "info",
|
|
||||||
LogLevel::Silent => "warn",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse from a loose string (CLI argument)
|
|
||||||
pub fn from_str_loose(s: &str) -> Self {
|
|
||||||
match s.to_lowercase().as_str() {
|
|
||||||
"debug" | "trace" => LogLevel::Debug,
|
|
||||||
"verbose" => LogLevel::Verbose,
|
|
||||||
"normal" | "info" => LogLevel::Normal,
|
|
||||||
"silent" | "quiet" | "error" | "warn" => LogLevel::Silent,
|
|
||||||
_ => LogLevel::Normal,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for LogLevel {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
LogLevel::Debug => write!(f, "debug"),
|
|
||||||
LogLevel::Verbose => write!(f, "verbose"),
|
|
||||||
LogLevel::Normal => write!(f, "normal"),
|
|
||||||
LogLevel::Silent => write!(f, "silent"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ============= Sub-Configs =============
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct ProxyModes {
|
|
||||||
#[serde(default)]
|
|
||||||
pub classic: bool,
|
|
||||||
#[serde(default)]
|
|
||||||
pub secure: bool,
|
|
||||||
#[serde(default = "default_true")]
|
|
||||||
pub tls: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ProxyModes {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
classic: true,
|
|
||||||
secure: true,
|
|
||||||
tls: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct GeneralConfig {
|
|
||||||
#[serde(default)]
|
|
||||||
pub modes: ProxyModes,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub prefer_ipv6: bool,
|
|
||||||
|
|
||||||
#[serde(default = "default_true")]
|
|
||||||
pub fast_mode: bool,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub use_middle_proxy: bool,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub ad_tag: Option<String>,
|
|
||||||
|
|
||||||
/// Path to proxy-secret binary file (auto-downloaded if absent).
|
|
||||||
/// Infrastructure secret from https://core.telegram.org/getProxySecret
|
|
||||||
#[serde(default)]
|
|
||||||
pub proxy_secret_path: Option<String>,
|
|
||||||
|
|
||||||
/// Public IP override for middle-proxy NAT environments.
|
|
||||||
/// When set, this IP is used in ME key derivation and RPC_PROXY_REQ "our_addr".
|
|
||||||
#[serde(default)]
|
|
||||||
pub middle_proxy_nat_ip: Option<IpAddr>,
|
|
||||||
|
|
||||||
/// Enable STUN-based NAT probing to discover public IP:port for ME KDF.
|
|
||||||
#[serde(default)]
|
|
||||||
pub middle_proxy_nat_probe: bool,
|
|
||||||
|
|
||||||
/// Optional STUN server address (host:port) for NAT probing.
|
|
||||||
#[serde(default)]
|
|
||||||
pub middle_proxy_nat_stun: Option<String>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub log_level: LogLevel,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for GeneralConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
modes: ProxyModes::default(),
|
|
||||||
prefer_ipv6: false,
|
|
||||||
fast_mode: true,
|
|
||||||
use_middle_proxy: false,
|
|
||||||
ad_tag: None,
|
|
||||||
proxy_secret_path: None,
|
|
||||||
middle_proxy_nat_ip: None,
|
|
||||||
middle_proxy_nat_probe: false,
|
|
||||||
middle_proxy_nat_stun: None,
|
|
||||||
log_level: LogLevel::Normal,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct ServerConfig {
|
|
||||||
#[serde(default = "default_port")]
|
|
||||||
pub port: u16,
|
|
||||||
|
|
||||||
#[serde(default = "default_listen_addr")]
|
|
||||||
pub listen_addr_ipv4: String,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub listen_addr_ipv6: Option<String>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub listen_unix_sock: Option<String>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub metrics_port: Option<u16>,
|
|
||||||
|
|
||||||
#[serde(default = "default_metrics_whitelist")]
|
|
||||||
pub metrics_whitelist: Vec<IpAddr>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub listeners: Vec<ListenerConfig>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ServerConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
port: default_port(),
|
|
||||||
listen_addr_ipv4: default_listen_addr(),
|
|
||||||
listen_addr_ipv6: Some("::".to_string()),
|
|
||||||
listen_unix_sock: None,
|
|
||||||
metrics_port: None,
|
|
||||||
metrics_whitelist: default_metrics_whitelist(),
|
|
||||||
listeners: Vec::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct TimeoutsConfig {
|
|
||||||
#[serde(default = "default_handshake_timeout")]
|
|
||||||
pub client_handshake: u64,
|
|
||||||
|
|
||||||
#[serde(default = "default_connect_timeout")]
|
|
||||||
pub tg_connect: u64,
|
|
||||||
|
|
||||||
#[serde(default = "default_keepalive")]
|
|
||||||
pub client_keepalive: u64,
|
|
||||||
|
|
||||||
#[serde(default = "default_ack_timeout")]
|
|
||||||
pub client_ack: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for TimeoutsConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
client_handshake: default_handshake_timeout(),
|
|
||||||
tg_connect: default_connect_timeout(),
|
|
||||||
client_keepalive: default_keepalive(),
|
|
||||||
client_ack: default_ack_timeout(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct AntiCensorshipConfig {
|
|
||||||
#[serde(default = "default_tls_domain")]
|
|
||||||
pub tls_domain: String,
|
|
||||||
|
|
||||||
#[serde(default = "default_true")]
|
|
||||||
pub mask: bool,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub mask_host: Option<String>,
|
|
||||||
|
|
||||||
#[serde(default = "default_mask_port")]
|
|
||||||
pub mask_port: u16,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub mask_unix_sock: Option<String>,
|
|
||||||
|
|
||||||
#[serde(default = "default_fake_cert_len")]
|
|
||||||
pub fake_cert_len: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for AntiCensorshipConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
tls_domain: default_tls_domain(),
|
|
||||||
mask: true,
|
|
||||||
mask_host: None,
|
|
||||||
mask_port: default_mask_port(),
|
|
||||||
mask_unix_sock: None,
|
|
||||||
fake_cert_len: default_fake_cert_len(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct AccessConfig {
|
|
||||||
#[serde(default)]
|
|
||||||
pub users: HashMap<String, String>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub user_max_tcp_conns: HashMap<String, usize>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub user_expirations: HashMap<String, DateTime<Utc>>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub user_data_quota: HashMap<String, u64>,
|
|
||||||
|
|
||||||
#[serde(default = "default_replay_check_len")]
|
|
||||||
pub replay_check_len: usize,
|
|
||||||
|
|
||||||
#[serde(default = "default_replay_window_secs")]
|
|
||||||
pub replay_window_secs: u64,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub ignore_time_skew: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for AccessConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
let mut users = HashMap::new();
|
|
||||||
users.insert(
|
|
||||||
"default".to_string(),
|
|
||||||
"00000000000000000000000000000000".to_string(),
|
|
||||||
);
|
|
||||||
Self {
|
|
||||||
users,
|
|
||||||
user_max_tcp_conns: HashMap::new(),
|
|
||||||
user_expirations: HashMap::new(),
|
|
||||||
user_data_quota: HashMap::new(),
|
|
||||||
replay_check_len: default_replay_check_len(),
|
|
||||||
replay_window_secs: default_replay_window_secs(),
|
|
||||||
ignore_time_skew: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ============= Aux Structures =============
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
#[serde(tag = "type", rename_all = "lowercase")]
|
|
||||||
pub enum UpstreamType {
|
|
||||||
Direct {
|
|
||||||
#[serde(default)]
|
|
||||||
interface: Option<String>,
|
|
||||||
},
|
|
||||||
Socks4 {
|
|
||||||
address: String,
|
|
||||||
#[serde(default)]
|
|
||||||
interface: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
user_id: Option<String>,
|
|
||||||
},
|
|
||||||
Socks5 {
|
|
||||||
address: String,
|
|
||||||
#[serde(default)]
|
|
||||||
interface: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
username: Option<String>,
|
|
||||||
#[serde(default)]
|
|
||||||
password: Option<String>,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct UpstreamConfig {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub upstream_type: UpstreamType,
|
|
||||||
#[serde(default = "default_weight")]
|
|
||||||
pub weight: u16,
|
|
||||||
#[serde(default = "default_true")]
|
|
||||||
pub enabled: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct ListenerConfig {
|
|
||||||
pub ip: IpAddr,
|
|
||||||
#[serde(default)]
|
|
||||||
pub announce_ip: Option<IpAddr>,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ============= Main Config =============
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
|
||||||
pub struct ProxyConfig {
|
|
||||||
#[serde(default)]
|
|
||||||
pub general: GeneralConfig,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub server: ServerConfig,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub timeouts: TimeoutsConfig,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub censorship: AntiCensorshipConfig,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub access: AccessConfig,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub upstreams: Vec<UpstreamConfig>,
|
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub show_link: Vec<String>,
|
|
||||||
|
|
||||||
/// DC address overrides for non-standard DCs (CDN, media, test, etc.)
|
|
||||||
/// Keys are DC indices as strings, values are "ip:port" addresses.
|
|
||||||
/// Matches the C implementation's `proxy_for <dc_id> <ip>:<port>` config directive.
|
|
||||||
/// Example in config.toml:
|
|
||||||
/// [dc_overrides]
|
|
||||||
/// "203" = "149.154.175.100:443"
|
|
||||||
#[serde(default)]
|
|
||||||
pub dc_overrides: HashMap<String, String>,
|
|
||||||
|
|
||||||
/// Default DC index (1-5) for unmapped non-standard DCs.
|
|
||||||
/// Matches the C implementation's `default <dc_id>` config directive.
|
|
||||||
/// If not set, defaults to 2 (matching Telegram's official `default 2;` in proxy-multi.conf).
|
|
||||||
#[serde(default)]
|
|
||||||
pub default_dc: Option<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ProxyConfig {
|
|
||||||
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
|
||||||
let content =
|
|
||||||
std::fs::read_to_string(path).map_err(|e| ProxyError::Config(e.to_string()))?;
|
|
||||||
|
|
||||||
let mut config: ProxyConfig =
|
|
||||||
toml::from_str(&content).map_err(|e| ProxyError::Config(e.to_string()))?;
|
|
||||||
|
|
||||||
// Validate secrets
|
|
||||||
for (user, secret) in &config.access.users {
|
|
||||||
if !secret.chars().all(|c| c.is_ascii_hexdigit()) || secret.len() != 32 {
|
|
||||||
return Err(ProxyError::InvalidSecret {
|
|
||||||
user: user.clone(),
|
|
||||||
reason: "Must be 32 hex characters".to_string(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate tls_domain
|
|
||||||
if config.censorship.tls_domain.is_empty() {
|
|
||||||
return Err(ProxyError::Config("tls_domain cannot be empty".to_string()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate mask_unix_sock
|
|
||||||
if let Some(ref sock_path) = config.censorship.mask_unix_sock {
|
|
||||||
if sock_path.is_empty() {
|
|
||||||
return Err(ProxyError::Config(
|
|
||||||
"mask_unix_sock cannot be empty".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
#[cfg(unix)]
|
|
||||||
if sock_path.len() > 107 {
|
|
||||||
return Err(ProxyError::Config(format!(
|
|
||||||
"mask_unix_sock path too long: {} bytes (max 107)",
|
|
||||||
sock_path.len()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
#[cfg(not(unix))]
|
|
||||||
return Err(ProxyError::Config(
|
|
||||||
"mask_unix_sock is only supported on Unix platforms".to_string(),
|
|
||||||
));
|
|
||||||
|
|
||||||
if config.censorship.mask_host.is_some() {
|
|
||||||
return Err(ProxyError::Config(
|
|
||||||
"mask_unix_sock and mask_host are mutually exclusive".to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default mask_host to tls_domain if not set and no unix socket configured
|
|
||||||
if config.censorship.mask_host.is_none() && config.censorship.mask_unix_sock.is_none() {
|
|
||||||
config.censorship.mask_host = Some(config.censorship.tls_domain.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Random fake_cert_len
|
|
||||||
use rand::Rng;
|
|
||||||
config.censorship.fake_cert_len = rand::rng().gen_range(1024..4096);
|
|
||||||
|
|
||||||
// Migration: Populate listeners if empty
|
|
||||||
if config.server.listeners.is_empty() {
|
|
||||||
if let Ok(ipv4) = config.server.listen_addr_ipv4.parse::<IpAddr>() {
|
|
||||||
config.server.listeners.push(ListenerConfig {
|
|
||||||
ip: ipv4,
|
|
||||||
announce_ip: None,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if let Some(ipv6_str) = &config.server.listen_addr_ipv6 {
|
|
||||||
if let Ok(ipv6) = ipv6_str.parse::<IpAddr>() {
|
|
||||||
config.server.listeners.push(ListenerConfig {
|
|
||||||
ip: ipv6,
|
|
||||||
announce_ip: None,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Migration: Populate upstreams if empty (Default Direct)
|
|
||||||
if config.upstreams.is_empty() {
|
|
||||||
config.upstreams.push(UpstreamConfig {
|
|
||||||
upstream_type: UpstreamType::Direct { interface: None },
|
|
||||||
weight: 1,
|
|
||||||
enabled: true,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(config)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn validate(&self) -> Result<()> {
|
|
||||||
if self.access.users.is_empty() {
|
|
||||||
return Err(ProxyError::Config("No users configured".to_string()));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !self.general.modes.classic && !self.general.modes.secure && !self.general.modes.tls {
|
|
||||||
return Err(ProxyError::Config("No modes enabled".to_string()));
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.censorship.tls_domain.contains(' ') || self.censorship.tls_domain.contains('/') {
|
|
||||||
return Err(ProxyError::Config(format!(
|
|
||||||
"Invalid tls_domain: '{}'. Must be a valid domain name",
|
|
||||||
self.censorship.tls_domain
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
514
src/config/types.rs
Normal file
514
src/config/types.rs
Normal file
@@ -0,0 +1,514 @@
|
|||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
|
||||||
|
use super::defaults::*;
|
||||||
|
|
||||||
|
// ============= Log Level =============
|
||||||
|
|
||||||
|
/// Logging verbosity level.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
|
||||||
|
#[serde(rename_all = "lowercase")]
|
||||||
|
pub enum LogLevel {
|
||||||
|
/// All messages including trace (trace + debug + info + warn + error).
|
||||||
|
Debug,
|
||||||
|
/// Detailed operational logs (debug + info + warn + error).
|
||||||
|
Verbose,
|
||||||
|
/// Standard operational logs (info + warn + error).
|
||||||
|
#[default]
|
||||||
|
Normal,
|
||||||
|
/// Minimal output: only warnings and errors (warn + error).
|
||||||
|
/// Startup messages (config, DC connectivity, proxy links) are always shown
|
||||||
|
/// via info! before the filter is applied.
|
||||||
|
Silent,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LogLevel {
|
||||||
|
/// Convert to tracing EnvFilter directive string.
|
||||||
|
pub fn to_filter_str(&self) -> &'static str {
|
||||||
|
match self {
|
||||||
|
LogLevel::Debug => "trace",
|
||||||
|
LogLevel::Verbose => "debug",
|
||||||
|
LogLevel::Normal => "info",
|
||||||
|
LogLevel::Silent => "warn",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse from a loose string (CLI argument).
|
||||||
|
pub fn from_str_loose(s: &str) -> Self {
|
||||||
|
match s.to_lowercase().as_str() {
|
||||||
|
"debug" | "trace" => LogLevel::Debug,
|
||||||
|
"verbose" => LogLevel::Verbose,
|
||||||
|
"normal" | "info" => LogLevel::Normal,
|
||||||
|
"silent" | "quiet" | "error" | "warn" => LogLevel::Silent,
|
||||||
|
_ => LogLevel::Normal,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for LogLevel {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
LogLevel::Debug => write!(f, "debug"),
|
||||||
|
LogLevel::Verbose => write!(f, "verbose"),
|
||||||
|
LogLevel::Normal => write!(f, "normal"),
|
||||||
|
LogLevel::Silent => write!(f, "silent"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============= Sub-Configs =============
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ProxyModes {
|
||||||
|
#[serde(default)]
|
||||||
|
pub classic: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub secure: bool,
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub tls: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ProxyModes {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
classic: true,
|
||||||
|
secure: true,
|
||||||
|
tls: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct NetworkConfig {
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub ipv4: bool,
|
||||||
|
|
||||||
|
/// None = auto-detect IPv6 availability.
|
||||||
|
#[serde(default)]
|
||||||
|
pub ipv6: Option<bool>,
|
||||||
|
|
||||||
|
/// 4 or 6.
|
||||||
|
#[serde(default = "default_prefer_4")]
|
||||||
|
pub prefer: u8,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub multipath: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for NetworkConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
ipv4: true,
|
||||||
|
ipv6: None,
|
||||||
|
prefer: 4,
|
||||||
|
multipath: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct GeneralConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub modes: ProxyModes,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub prefer_ipv6: bool,
|
||||||
|
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub fast_mode: bool,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub use_middle_proxy: bool,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub ad_tag: Option<String>,
|
||||||
|
|
||||||
|
/// Path to proxy-secret binary file (auto-downloaded if absent).
|
||||||
|
/// Infrastructure secret from https://core.telegram.org/getProxySecret.
|
||||||
|
#[serde(default)]
|
||||||
|
pub proxy_secret_path: Option<String>,
|
||||||
|
|
||||||
|
/// Public IP override for middle-proxy NAT environments.
|
||||||
|
/// When set, this IP is used in ME key derivation and RPC_PROXY_REQ "our_addr".
|
||||||
|
#[serde(default)]
|
||||||
|
pub middle_proxy_nat_ip: Option<IpAddr>,
|
||||||
|
|
||||||
|
/// Enable STUN-based NAT probing to discover public IP:port for ME KDF.
|
||||||
|
#[serde(default)]
|
||||||
|
pub middle_proxy_nat_probe: bool,
|
||||||
|
|
||||||
|
/// Optional STUN server address (host:port) for NAT probing.
|
||||||
|
#[serde(default)]
|
||||||
|
pub middle_proxy_nat_stun: Option<String>,
|
||||||
|
|
||||||
|
/// Ignore STUN/interface IP mismatch (keep using Middle Proxy even if NAT detected).
|
||||||
|
#[serde(default)]
|
||||||
|
pub stun_iface_mismatch_ignore: bool,
|
||||||
|
|
||||||
|
/// Log unknown (non-standard) DC requests to a file (default: unknown-dc.txt). Set to null to disable.
|
||||||
|
#[serde(default = "default_unknown_dc_log_path")]
|
||||||
|
pub unknown_dc_log_path: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub log_level: LogLevel,
|
||||||
|
|
||||||
|
/// Disable colored output in logs (useful for files/systemd).
|
||||||
|
#[serde(default)]
|
||||||
|
pub disable_colors: bool,
|
||||||
|
|
||||||
|
/// [general.links] — proxy link generation overrides.
|
||||||
|
#[serde(default)]
|
||||||
|
pub links: LinksConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for GeneralConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
modes: ProxyModes::default(),
|
||||||
|
prefer_ipv6: false,
|
||||||
|
fast_mode: true,
|
||||||
|
use_middle_proxy: false,
|
||||||
|
ad_tag: None,
|
||||||
|
proxy_secret_path: None,
|
||||||
|
middle_proxy_nat_ip: None,
|
||||||
|
middle_proxy_nat_probe: false,
|
||||||
|
middle_proxy_nat_stun: None,
|
||||||
|
stun_iface_mismatch_ignore: false,
|
||||||
|
unknown_dc_log_path: default_unknown_dc_log_path(),
|
||||||
|
log_level: LogLevel::Normal,
|
||||||
|
disable_colors: false,
|
||||||
|
links: LinksConfig::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// `[general.links]` — proxy link generation settings.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
|
pub struct LinksConfig {
|
||||||
|
/// List of usernames whose tg:// links to display at startup.
|
||||||
|
/// `"*"` = all users, `["alice", "bob"]` = specific users.
|
||||||
|
#[serde(default)]
|
||||||
|
pub show: ShowLink,
|
||||||
|
|
||||||
|
/// Public hostname/IP for tg:// link generation (overrides detected IP).
|
||||||
|
#[serde(default)]
|
||||||
|
pub public_host: Option<String>,
|
||||||
|
|
||||||
|
/// Public port for tg:// link generation (overrides server.port).
|
||||||
|
#[serde(default)]
|
||||||
|
pub public_port: Option<u16>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ServerConfig {
|
||||||
|
#[serde(default = "default_port")]
|
||||||
|
pub port: u16,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub listen_addr_ipv4: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub listen_addr_ipv6: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub listen_unix_sock: Option<String>,
|
||||||
|
|
||||||
|
/// Unix socket file permissions (octal, e.g. "0666" or "0777").
|
||||||
|
/// Applied via chmod after bind. Default: no change (inherits umask).
|
||||||
|
#[serde(default)]
|
||||||
|
pub listen_unix_sock_perm: Option<String>,
|
||||||
|
|
||||||
|
/// Enable TCP listening. Default: true when no unix socket, false when
|
||||||
|
/// listen_unix_sock is set. Set explicitly to override auto-detection.
|
||||||
|
#[serde(default)]
|
||||||
|
pub listen_tcp: Option<bool>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub metrics_port: Option<u16>,
|
||||||
|
|
||||||
|
#[serde(default = "default_metrics_whitelist")]
|
||||||
|
pub metrics_whitelist: Vec<IpAddr>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub listeners: Vec<ListenerConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ServerConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
port: default_port(),
|
||||||
|
listen_addr_ipv4: Some(default_listen_addr()),
|
||||||
|
listen_addr_ipv6: Some("::".to_string()),
|
||||||
|
listen_unix_sock: None,
|
||||||
|
listen_unix_sock_perm: None,
|
||||||
|
listen_tcp: None,
|
||||||
|
metrics_port: None,
|
||||||
|
metrics_whitelist: default_metrics_whitelist(),
|
||||||
|
listeners: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct TimeoutsConfig {
|
||||||
|
#[serde(default = "default_handshake_timeout")]
|
||||||
|
pub client_handshake: u64,
|
||||||
|
|
||||||
|
#[serde(default = "default_connect_timeout")]
|
||||||
|
pub tg_connect: u64,
|
||||||
|
|
||||||
|
#[serde(default = "default_keepalive")]
|
||||||
|
pub client_keepalive: u64,
|
||||||
|
|
||||||
|
#[serde(default = "default_ack_timeout")]
|
||||||
|
pub client_ack: u64,
|
||||||
|
|
||||||
|
/// Number of quick ME reconnect attempts for single-address DC.
|
||||||
|
#[serde(default = "default_me_one_retry")]
|
||||||
|
pub me_one_retry: u8,
|
||||||
|
|
||||||
|
/// Timeout per quick attempt in milliseconds for single-address DC.
|
||||||
|
#[serde(default = "default_me_one_timeout")]
|
||||||
|
pub me_one_timeout_ms: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TimeoutsConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
client_handshake: default_handshake_timeout(),
|
||||||
|
tg_connect: default_connect_timeout(),
|
||||||
|
client_keepalive: default_keepalive(),
|
||||||
|
client_ack: default_ack_timeout(),
|
||||||
|
me_one_retry: default_me_one_retry(),
|
||||||
|
me_one_timeout_ms: default_me_one_timeout(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct AntiCensorshipConfig {
|
||||||
|
#[serde(default = "default_tls_domain")]
|
||||||
|
pub tls_domain: String,
|
||||||
|
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub mask: bool,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub mask_host: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default = "default_mask_port")]
|
||||||
|
pub mask_port: u16,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub mask_unix_sock: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default = "default_fake_cert_len")]
|
||||||
|
pub fake_cert_len: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for AntiCensorshipConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
tls_domain: default_tls_domain(),
|
||||||
|
mask: true,
|
||||||
|
mask_host: None,
|
||||||
|
mask_port: default_mask_port(),
|
||||||
|
mask_unix_sock: None,
|
||||||
|
fake_cert_len: default_fake_cert_len(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct AccessConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub users: HashMap<String, String>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub user_max_tcp_conns: HashMap<String, usize>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub user_expirations: HashMap<String, DateTime<Utc>>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub user_data_quota: HashMap<String, u64>,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub user_max_unique_ips: HashMap<String, usize>,
|
||||||
|
|
||||||
|
#[serde(default = "default_replay_check_len")]
|
||||||
|
pub replay_check_len: usize,
|
||||||
|
|
||||||
|
#[serde(default = "default_replay_window_secs")]
|
||||||
|
pub replay_window_secs: u64,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub ignore_time_skew: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for AccessConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
let mut users = HashMap::new();
|
||||||
|
users.insert(
|
||||||
|
"default".to_string(),
|
||||||
|
"00000000000000000000000000000000".to_string(),
|
||||||
|
);
|
||||||
|
Self {
|
||||||
|
users,
|
||||||
|
user_max_tcp_conns: HashMap::new(),
|
||||||
|
user_expirations: HashMap::new(),
|
||||||
|
user_data_quota: HashMap::new(),
|
||||||
|
user_max_unique_ips: HashMap::new(),
|
||||||
|
replay_check_len: default_replay_check_len(),
|
||||||
|
replay_window_secs: default_replay_window_secs(),
|
||||||
|
ignore_time_skew: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============= Aux Structures =============
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||||
|
#[serde(tag = "type", rename_all = "lowercase")]
|
||||||
|
pub enum UpstreamType {
|
||||||
|
Direct {
|
||||||
|
#[serde(default)]
|
||||||
|
interface: Option<String>,
|
||||||
|
},
|
||||||
|
Socks4 {
|
||||||
|
address: String,
|
||||||
|
#[serde(default)]
|
||||||
|
interface: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
user_id: Option<String>,
|
||||||
|
},
|
||||||
|
Socks5 {
|
||||||
|
address: String,
|
||||||
|
#[serde(default)]
|
||||||
|
interface: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
username: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
password: Option<String>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct UpstreamConfig {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub upstream_type: UpstreamType,
|
||||||
|
#[serde(default = "default_weight")]
|
||||||
|
pub weight: u16,
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub enabled: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ListenerConfig {
|
||||||
|
pub ip: IpAddr,
|
||||||
|
/// IP address or hostname to announce in proxy links.
|
||||||
|
/// Takes precedence over `announce_ip` if both are set.
|
||||||
|
#[serde(default)]
|
||||||
|
pub announce: Option<String>,
|
||||||
|
/// Deprecated: Use `announce` instead. IP address to announce in proxy links.
|
||||||
|
/// Migrated to `announce` automatically if `announce` is not set.
|
||||||
|
#[serde(default)]
|
||||||
|
pub announce_ip: Option<IpAddr>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============= ShowLink =============
|
||||||
|
|
||||||
|
/// Controls which users' proxy links are displayed at startup.
|
||||||
|
///
|
||||||
|
/// In TOML, this can be:
|
||||||
|
/// - `show_link = "*"` — show links for all users
|
||||||
|
/// - `show_link = ["a", "b"]` — show links for specific users
|
||||||
|
/// - omitted — show no links (default)
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum ShowLink {
|
||||||
|
/// Don't show any links (default when omitted).
|
||||||
|
None,
|
||||||
|
/// Show links for all configured users.
|
||||||
|
All,
|
||||||
|
/// Show links for specific users.
|
||||||
|
Specific(Vec<String>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ShowLink {
|
||||||
|
fn default() -> Self {
|
||||||
|
ShowLink::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShowLink {
|
||||||
|
/// Returns true if no links should be shown.
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
matches!(self, ShowLink::None) || matches!(self, ShowLink::Specific(v) if v.is_empty())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve the list of user names to display, given all configured users.
|
||||||
|
pub fn resolve_users<'a>(&'a self, all_users: &'a HashMap<String, String>) -> Vec<&'a String> {
|
||||||
|
match self {
|
||||||
|
ShowLink::None => vec![],
|
||||||
|
ShowLink::All => {
|
||||||
|
let mut names: Vec<&String> = all_users.keys().collect();
|
||||||
|
names.sort();
|
||||||
|
names
|
||||||
|
}
|
||||||
|
ShowLink::Specific(names) => names.iter().collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for ShowLink {
|
||||||
|
fn serialize<S: serde::Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
|
||||||
|
match self {
|
||||||
|
ShowLink::None => Vec::<String>::new().serialize(serializer),
|
||||||
|
ShowLink::All => serializer.serialize_str("*"),
|
||||||
|
ShowLink::Specific(v) => v.serialize(serializer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for ShowLink {
|
||||||
|
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
|
||||||
|
use serde::de;
|
||||||
|
|
||||||
|
struct ShowLinkVisitor;
|
||||||
|
|
||||||
|
impl<'de> de::Visitor<'de> for ShowLinkVisitor {
|
||||||
|
type Value = ShowLink;
|
||||||
|
|
||||||
|
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
formatter.write_str(r#""*" or an array of user names"#)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_str<E: de::Error>(self, v: &str) -> std::result::Result<ShowLink, E> {
|
||||||
|
if v == "*" {
|
||||||
|
Ok(ShowLink::All)
|
||||||
|
} else {
|
||||||
|
Err(de::Error::invalid_value(
|
||||||
|
de::Unexpected::Str(v),
|
||||||
|
&r#""*""#,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_seq<A: de::SeqAccess<'de>>(self, mut seq: A) -> std::result::Result<ShowLink, A::Error> {
|
||||||
|
let mut names = Vec::new();
|
||||||
|
while let Some(name) = seq.next_element::<String>()? {
|
||||||
|
names.push(name);
|
||||||
|
}
|
||||||
|
if names.is_empty() {
|
||||||
|
Ok(ShowLink::None)
|
||||||
|
} else {
|
||||||
|
Ok(ShowLink::Specific(names))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deserializer.deserialize_any(ShowLinkVisitor)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -172,7 +172,7 @@ mod tests {
|
|||||||
let digest = sha256(&prekey);
|
let digest = sha256(&prekey);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
hex::encode(digest),
|
hex::encode(digest),
|
||||||
"a4595b75f1f610f2575ace802ddc65c91b5acef3b0e0d18189e0c7c9f787d15c"
|
"934f5facdafd65a44d5c2df90d2f35ddc81faaaeb337949dfeef817c8a7c1e00"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
462
src/ip_tracker.rs
Normal file
462
src/ip_tracker.rs
Normal file
@@ -0,0 +1,462 @@
|
|||||||
|
// src/ip_tracker.rs
|
||||||
|
// Модуль для отслеживания и ограничения уникальных IP-адресов пользователей
|
||||||
|
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
|
/// Трекер уникальных IP-адресов для каждого пользователя MTProxy
|
||||||
|
///
|
||||||
|
/// Предоставляет thread-safe механизм для:
|
||||||
|
/// - Отслеживания активных IP-адресов каждого пользователя
|
||||||
|
/// - Ограничения количества уникальных IP на пользователя
|
||||||
|
/// - Автоматической очистки при отключении клиентов
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct UserIpTracker {
|
||||||
|
/// Маппинг: Имя пользователя -> Множество активных IP-адресов
|
||||||
|
active_ips: Arc<RwLock<HashMap<String, HashSet<IpAddr>>>>,
|
||||||
|
|
||||||
|
/// Маппинг: Имя пользователя -> Максимально разрешенное количество уникальных IP
|
||||||
|
max_ips: Arc<RwLock<HashMap<String, usize>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserIpTracker {
|
||||||
|
/// Создать новый пустой трекер
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
active_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||||
|
max_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Установить лимит уникальных IP для конкретного пользователя
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `username` - Имя пользователя
|
||||||
|
/// * `max_ips` - Максимальное количество одновременно активных IP-адресов
|
||||||
|
pub async fn set_user_limit(&self, username: &str, max_ips: usize) {
|
||||||
|
let mut limits = self.max_ips.write().await;
|
||||||
|
limits.insert(username.to_string(), max_ips);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Загрузить лимиты из конфигурации
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `limits` - HashMap с лимитами из config.toml
|
||||||
|
pub async fn load_limits(&self, limits: &HashMap<String, usize>) {
|
||||||
|
let mut max_ips = self.max_ips.write().await;
|
||||||
|
for (user, limit) in limits {
|
||||||
|
max_ips.insert(user.clone(), *limit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Проверить, может ли пользователь подключиться с данного IP-адреса
|
||||||
|
/// и добавить IP в список активных, если проверка успешна
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `username` - Имя пользователя
|
||||||
|
/// * `ip` - IP-адрес клиента
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
/// * `Ok(())` - Подключение разрешено, IP добавлен в активные
|
||||||
|
/// * `Err(String)` - Подключение отклонено с описанием причины
|
||||||
|
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
|
||||||
|
// Получаем лимит для пользователя
|
||||||
|
let max_ips = self.max_ips.read().await;
|
||||||
|
let limit = match max_ips.get(username) {
|
||||||
|
Some(limit) => *limit,
|
||||||
|
None => {
|
||||||
|
// Если лимит не задан - разрешаем безлимитный доступ
|
||||||
|
drop(max_ips);
|
||||||
|
let mut active_ips = self.active_ips.write().await;
|
||||||
|
let user_ips = active_ips
|
||||||
|
.entry(username.to_string())
|
||||||
|
.or_insert_with(HashSet::new);
|
||||||
|
user_ips.insert(ip);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
drop(max_ips);
|
||||||
|
|
||||||
|
// Проверяем и обновляем активные IP
|
||||||
|
let mut active_ips = self.active_ips.write().await;
|
||||||
|
let user_ips = active_ips
|
||||||
|
.entry(username.to_string())
|
||||||
|
.or_insert_with(HashSet::new);
|
||||||
|
|
||||||
|
// Если IP уже есть в списке - это повторное подключение, разрешаем
|
||||||
|
if user_ips.contains(&ip) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Проверяем, не превышен ли лимит
|
||||||
|
if user_ips.len() >= limit {
|
||||||
|
return Err(format!(
|
||||||
|
"IP limit reached for user '{}': {}/{} unique IPs already connected",
|
||||||
|
username,
|
||||||
|
user_ips.len(),
|
||||||
|
limit
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Лимит не превышен - добавляем новый IP
|
||||||
|
user_ips.insert(ip);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Удалить IP-адрес из списка активных при отключении клиента
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `username` - Имя пользователя
|
||||||
|
/// * `ip` - IP-адрес отключившегося клиента
|
||||||
|
pub async fn remove_ip(&self, username: &str, ip: IpAddr) {
|
||||||
|
let mut active_ips = self.active_ips.write().await;
|
||||||
|
|
||||||
|
if let Some(user_ips) = active_ips.get_mut(username) {
|
||||||
|
user_ips.remove(&ip);
|
||||||
|
|
||||||
|
// Если у пользователя не осталось активных IP - удаляем запись
|
||||||
|
// для экономии памяти
|
||||||
|
if user_ips.is_empty() {
|
||||||
|
active_ips.remove(username);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Получить текущее количество активных IP-адресов для пользователя
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `username` - Имя пользователя
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
/// Количество уникальных активных IP-адресов
|
||||||
|
pub async fn get_active_ip_count(&self, username: &str) -> usize {
|
||||||
|
let active_ips = self.active_ips.read().await;
|
||||||
|
active_ips
|
||||||
|
.get(username)
|
||||||
|
.map(|ips| ips.len())
|
||||||
|
.unwrap_or(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Получить список всех активных IP-адресов для пользователя
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `username` - Имя пользователя
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
/// Вектор с активными IP-адресами
|
||||||
|
pub async fn get_active_ips(&self, username: &str) -> Vec<IpAddr> {
|
||||||
|
let active_ips = self.active_ips.read().await;
|
||||||
|
active_ips
|
||||||
|
.get(username)
|
||||||
|
.map(|ips| ips.iter().copied().collect())
|
||||||
|
.unwrap_or_else(Vec::new)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Получить статистику по всем пользователям
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
/// Вектор кортежей: (имя_пользователя, количество_активных_IP, лимит)
|
||||||
|
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
|
||||||
|
let active_ips = self.active_ips.read().await;
|
||||||
|
let max_ips = self.max_ips.read().await;
|
||||||
|
|
||||||
|
let mut stats = Vec::new();
|
||||||
|
|
||||||
|
// Собираем статистику по пользователям с активными подключениями
|
||||||
|
for (username, user_ips) in active_ips.iter() {
|
||||||
|
let limit = max_ips.get(username).copied().unwrap_or(0);
|
||||||
|
stats.push((username.clone(), user_ips.len(), limit));
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.sort_by(|a, b| a.0.cmp(&b.0)); // Сортируем по имени пользователя
|
||||||
|
stats
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Очистить все активные IP для пользователя (при необходимости)
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `username` - Имя пользователя
|
||||||
|
pub async fn clear_user_ips(&self, username: &str) {
|
||||||
|
let mut active_ips = self.active_ips.write().await;
|
||||||
|
active_ips.remove(username);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Очистить всю статистику (использовать с осторожностью!)
|
||||||
|
pub async fn clear_all(&self) {
|
||||||
|
let mut active_ips = self.active_ips.write().await;
|
||||||
|
active_ips.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Проверить, подключен ли пользователь с данного IP
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `username` - Имя пользователя
|
||||||
|
/// * `ip` - IP-адрес для проверки
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
/// `true` если IP активен, `false` если нет
|
||||||
|
pub async fn is_ip_active(&self, username: &str, ip: IpAddr) -> bool {
|
||||||
|
let active_ips = self.active_ips.read().await;
|
||||||
|
active_ips
|
||||||
|
.get(username)
|
||||||
|
.map(|ips| ips.contains(&ip))
|
||||||
|
.unwrap_or(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Получить лимит для пользователя
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// * `username` - Имя пользователя
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
/// Лимит IP-адресов или None, если лимит не установлен
|
||||||
|
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
|
||||||
|
let max_ips = self.max_ips.read().await;
|
||||||
|
max_ips.get(username).copied()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Форматировать статистику в читаемый текст
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
/// Строка со статистикой для логов или мониторинга
|
||||||
|
pub async fn format_stats(&self) -> String {
|
||||||
|
let stats = self.get_stats().await;
|
||||||
|
|
||||||
|
if stats.is_empty() {
|
||||||
|
return String::from("No active users");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut output = String::from("User IP Statistics:\n");
|
||||||
|
output.push_str("==================\n");
|
||||||
|
|
||||||
|
for (username, active_count, limit) in stats {
|
||||||
|
output.push_str(&format!(
|
||||||
|
"User: {:<20} Active IPs: {}/{}\n",
|
||||||
|
username,
|
||||||
|
active_count,
|
||||||
|
if limit > 0 { limit.to_string() } else { "unlimited".to_string() }
|
||||||
|
));
|
||||||
|
|
||||||
|
let ips = self.get_active_ips(&username).await;
|
||||||
|
for ip in ips {
|
||||||
|
output.push_str(&format!(" └─ {}\n", ip));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for UserIpTracker {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// ТЕСТЫ
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||||
|
|
||||||
|
fn test_ipv4(oct1: u8, oct2: u8, oct3: u8, oct4: u8) -> IpAddr {
|
||||||
|
IpAddr::V4(Ipv4Addr::new(oct1, oct2, oct3, oct4))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_ipv6() -> IpAddr {
|
||||||
|
IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_basic_ip_limit() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("test_user", 2).await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
|
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||||
|
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||||
|
|
||||||
|
// Первые два IP должны быть приняты
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||||
|
|
||||||
|
// Третий IP должен быть отклонен
|
||||||
|
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||||
|
|
||||||
|
// Проверяем счетчик
|
||||||
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_reconnection_from_same_ip() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("test_user", 2).await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
|
|
||||||
|
// Первое подключение
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
|
||||||
|
// Повторное подключение с того же IP должно пройти
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
|
||||||
|
// Счетчик не должен увеличиться
|
||||||
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_ip_removal() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("test_user", 2).await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
|
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||||
|
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||||
|
|
||||||
|
// Добавляем два IP
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||||
|
|
||||||
|
// Третий не должен пройти
|
||||||
|
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||||
|
|
||||||
|
// Удаляем первый IP
|
||||||
|
tracker.remove_ip("test_user", ip1).await;
|
||||||
|
|
||||||
|
// Теперь третий должен пройти
|
||||||
|
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||||
|
|
||||||
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_no_limit() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
// Не устанавливаем лимит для test_user
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
|
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||||
|
let ip3 = test_ipv4(192, 168, 1, 3);
|
||||||
|
|
||||||
|
// Без лимита все IP должны проходить
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
|
||||||
|
|
||||||
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_multiple_users() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("user1", 2).await;
|
||||||
|
tracker.set_user_limit("user2", 1).await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
|
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||||
|
|
||||||
|
// user1 может использовать 2 IP
|
||||||
|
assert!(tracker.check_and_add("user1", ip1).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("user1", ip2).await.is_ok());
|
||||||
|
|
||||||
|
// user2 может использовать только 1 IP
|
||||||
|
assert!(tracker.check_and_add("user2", ip1).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("user2", ip2).await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_ipv6_support() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("test_user", 2).await;
|
||||||
|
|
||||||
|
let ipv4 = test_ipv4(192, 168, 1, 1);
|
||||||
|
let ipv6 = test_ipv6();
|
||||||
|
|
||||||
|
// Должны работать оба типа адресов
|
||||||
|
assert!(tracker.check_and_add("test_user", ipv4).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("test_user", ipv6).await.is_ok());
|
||||||
|
|
||||||
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_get_active_ips() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("test_user", 3).await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
|
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||||
|
|
||||||
|
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||||
|
tracker.check_and_add("test_user", ip2).await.unwrap();
|
||||||
|
|
||||||
|
let active_ips = tracker.get_active_ips("test_user").await;
|
||||||
|
assert_eq!(active_ips.len(), 2);
|
||||||
|
assert!(active_ips.contains(&ip1));
|
||||||
|
assert!(active_ips.contains(&ip2));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_stats() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("user1", 3).await;
|
||||||
|
tracker.set_user_limit("user2", 2).await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
|
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||||
|
|
||||||
|
tracker.check_and_add("user1", ip1).await.unwrap();
|
||||||
|
tracker.check_and_add("user2", ip2).await.unwrap();
|
||||||
|
|
||||||
|
let stats = tracker.get_stats().await;
|
||||||
|
assert_eq!(stats.len(), 2);
|
||||||
|
|
||||||
|
// Проверяем наличие обоих пользователей в статистике
|
||||||
|
assert!(stats.iter().any(|(name, _, _)| name == "user1"));
|
||||||
|
assert!(stats.iter().any(|(name, _, _)| name == "user2"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_clear_user_ips() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
|
|
||||||
|
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||||
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
|
||||||
|
|
||||||
|
tracker.clear_user_ips("test_user").await;
|
||||||
|
assert_eq!(tracker.get_active_ip_count("test_user").await, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_is_ip_active() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
let ip1 = test_ipv4(192, 168, 1, 1);
|
||||||
|
let ip2 = test_ipv4(192, 168, 1, 2);
|
||||||
|
|
||||||
|
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||||
|
|
||||||
|
assert!(tracker.is_ip_active("test_user", ip1).await);
|
||||||
|
assert!(!tracker.is_ip_active("test_user", ip2).await);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_load_limits_from_config() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
|
||||||
|
let mut config_limits = HashMap::new();
|
||||||
|
config_limits.insert("user1".to_string(), 5);
|
||||||
|
config_limits.insert("user2".to_string(), 3);
|
||||||
|
|
||||||
|
tracker.load_limits(&config_limits).await;
|
||||||
|
|
||||||
|
assert_eq!(tracker.get_user_limit("user1").await, Some(5));
|
||||||
|
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
|
||||||
|
assert_eq!(tracker.get_user_limit("user3").await, None);
|
||||||
|
}
|
||||||
|
}
|
||||||
596
src/main.rs
596
src/main.rs
@@ -8,11 +8,16 @@ use tokio::signal;
|
|||||||
use tokio::sync::Semaphore;
|
use tokio::sync::Semaphore;
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
use tracing_subscriber::{EnvFilter, fmt, prelude::*, reload};
|
use tracing_subscriber::{EnvFilter, fmt, prelude::*, reload};
|
||||||
|
#[cfg(unix)]
|
||||||
|
use tokio::net::UnixListener;
|
||||||
|
|
||||||
mod cli;
|
mod cli;
|
||||||
mod config;
|
mod config;
|
||||||
mod crypto;
|
mod crypto;
|
||||||
mod error;
|
mod error;
|
||||||
|
mod ip_tracker;
|
||||||
|
mod network;
|
||||||
|
mod metrics;
|
||||||
mod protocol;
|
mod protocol;
|
||||||
mod proxy;
|
mod proxy;
|
||||||
mod stats;
|
mod stats;
|
||||||
@@ -22,12 +27,15 @@ mod util;
|
|||||||
|
|
||||||
use crate::config::{LogLevel, ProxyConfig};
|
use crate::config::{LogLevel, ProxyConfig};
|
||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::ip_tracker::UserIpTracker;
|
||||||
|
use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe};
|
||||||
use crate::proxy::ClientHandler;
|
use crate::proxy::ClientHandler;
|
||||||
use crate::stats::{ReplayChecker, Stats};
|
use crate::stats::{ReplayChecker, Stats};
|
||||||
use crate::stream::BufferPool;
|
use crate::stream::BufferPool;
|
||||||
use crate::transport::middle_proxy::MePool;
|
use crate::transport::middle_proxy::{
|
||||||
|
MePool, fetch_proxy_config, run_me_ping, MePingFamily, MePingSample, format_sample_line,
|
||||||
|
};
|
||||||
use crate::transport::{ListenOptions, UpstreamManager, create_listener};
|
use crate::transport::{ListenOptions, UpstreamManager, create_listener};
|
||||||
use crate::util::ip::detect_ip;
|
|
||||||
|
|
||||||
fn parse_cli() -> (String, bool, Option<String>) {
|
fn parse_cli() -> (String, bool, Option<String>) {
|
||||||
let mut config_path = "config.toml".to_string();
|
let mut config_path = "config.toml".to_string();
|
||||||
@@ -97,11 +105,42 @@ fn parse_cli() -> (String, bool, Option<String>) {
|
|||||||
(config_path, silent, log_level)
|
(config_path, silent, log_level)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
|
||||||
|
info!("--- Proxy Links ({}) ---", host);
|
||||||
|
for user_name in config.general.links.show.resolve_users(&config.access.users) {
|
||||||
|
if let Some(secret) = config.access.users.get(user_name) {
|
||||||
|
info!("User: {}", user_name);
|
||||||
|
if config.general.modes.classic {
|
||||||
|
info!(
|
||||||
|
" Classic: tg://proxy?server={}&port={}&secret={}",
|
||||||
|
host, port, secret
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if config.general.modes.secure {
|
||||||
|
info!(
|
||||||
|
" DD: tg://proxy?server={}&port={}&secret=dd{}",
|
||||||
|
host, port, secret
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if config.general.modes.tls {
|
||||||
|
let domain_hex = hex::encode(&config.censorship.tls_domain);
|
||||||
|
info!(
|
||||||
|
" EE-TLS: tg://proxy?server={}&port={}&secret=ee{}{}",
|
||||||
|
host, port, secret, domain_hex
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("User '{}' in show_link not found", user_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!("------------------------");
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||||
let (config_path, cli_silent, cli_log_level) = parse_cli();
|
let (config_path, cli_silent, cli_log_level) = parse_cli();
|
||||||
|
|
||||||
let config = match ProxyConfig::load(&config_path) {
|
let mut config = match ProxyConfig::load(&config_path) {
|
||||||
Ok(c) => c,
|
Ok(c) => c,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
if std::path::Path::new(&config_path).exists() {
|
if std::path::Path::new(&config_path).exists() {
|
||||||
@@ -131,13 +170,24 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new("info"));
|
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new("info"));
|
||||||
|
|
||||||
|
// Configure color output based on config
|
||||||
|
let fmt_layer = if config.general.disable_colors {
|
||||||
|
fmt::Layer::default().with_ansi(false)
|
||||||
|
} else {
|
||||||
|
fmt::Layer::default().with_ansi(true)
|
||||||
|
};
|
||||||
|
|
||||||
tracing_subscriber::registry()
|
tracing_subscriber::registry()
|
||||||
.with(filter_layer)
|
.with(filter_layer)
|
||||||
.with(fmt::Layer::default())
|
.with(fmt_layer)
|
||||||
.init();
|
.init();
|
||||||
|
|
||||||
info!("Telemt MTProxy v{}", env!("CARGO_PKG_VERSION"));
|
info!("Telemt MTProxy v{}", env!("CARGO_PKG_VERSION"));
|
||||||
info!("Log level: {}", effective_log_level);
|
info!("Log level: {}", effective_log_level);
|
||||||
|
if config.general.disable_colors {
|
||||||
|
info!("Colors: disabled");
|
||||||
|
}
|
||||||
info!(
|
info!(
|
||||||
"Modes: classic={} secure={} tls={}",
|
"Modes: classic={} secure={} tls={}",
|
||||||
config.general.modes.classic, config.general.modes.secure, config.general.modes.tls
|
config.general.modes.classic, config.general.modes.secure, config.general.modes.tls
|
||||||
@@ -168,23 +218,36 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
warn!("Using default tls_domain. Consider setting a custom domain.");
|
warn!("Using default tls_domain. Consider setting a custom domain.");
|
||||||
}
|
}
|
||||||
|
|
||||||
let prefer_ipv6 = config.general.prefer_ipv6;
|
let probe = run_probe(
|
||||||
let use_middle_proxy = config.general.use_middle_proxy;
|
&config.network,
|
||||||
let config = Arc::new(config);
|
config.general.middle_proxy_nat_stun.clone(),
|
||||||
|
config.general.middle_proxy_nat_probe,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let decision = decide_network_capabilities(&config.network, &probe);
|
||||||
|
log_probe_result(&probe, &decision);
|
||||||
|
|
||||||
|
let prefer_ipv6 = decision.prefer_ipv6();
|
||||||
|
let mut use_middle_proxy = config.general.use_middle_proxy && (decision.ipv4_me || decision.ipv6_me);
|
||||||
let stats = Arc::new(Stats::new());
|
let stats = Arc::new(Stats::new());
|
||||||
let rng = Arc::new(SecureRandom::new());
|
let rng = Arc::new(SecureRandom::new());
|
||||||
|
|
||||||
let replay_checker = Arc::new(ReplayChecker::new(
|
// IP Tracker initialization
|
||||||
config.access.replay_check_len,
|
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||||
Duration::from_secs(config.access.replay_window_secs),
|
ip_tracker.load_limits(&config.access.user_max_unique_ips).await;
|
||||||
));
|
|
||||||
|
if !config.access.user_max_unique_ips.is_empty() {
|
||||||
let upstream_manager = Arc::new(UpstreamManager::new(config.upstreams.clone()));
|
info!("IP limits configured for {} users", config.access.user_max_unique_ips.len());
|
||||||
let buffer_pool = Arc::new(BufferPool::with_config(16 * 1024, 4096));
|
}
|
||||||
|
|
||||||
// Connection concurrency limit
|
// Connection concurrency limit
|
||||||
let _max_connections = Arc::new(Semaphore::new(10_000));
|
let _max_connections = Arc::new(Semaphore::new(10_000));
|
||||||
|
|
||||||
|
if use_middle_proxy && !decision.ipv4_me && !decision.ipv6_me {
|
||||||
|
warn!("No usable IP family for Middle Proxy detected; falling back to direct DC");
|
||||||
|
use_middle_proxy = false;
|
||||||
|
}
|
||||||
|
|
||||||
// =====================================================================
|
// =====================================================================
|
||||||
// Middle Proxy initialization (if enabled)
|
// Middle Proxy initialization (if enabled)
|
||||||
// =====================================================================
|
// =====================================================================
|
||||||
@@ -209,25 +272,44 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
// proxy-secret is from: https://core.telegram.org/getProxySecret
|
// proxy-secret is from: https://core.telegram.org/getProxySecret
|
||||||
// =============================================================
|
// =============================================================
|
||||||
let proxy_secret_path = config.general.proxy_secret_path.as_deref();
|
let proxy_secret_path = config.general.proxy_secret_path.as_deref();
|
||||||
match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).await {
|
match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).await {
|
||||||
Ok(proxy_secret) => {
|
Ok(proxy_secret) => {
|
||||||
info!(
|
info!(
|
||||||
secret_len = proxy_secret.len(),
|
secret_len = proxy_secret.len() as usize, // ← ЯВНЫЙ ТИП usize
|
||||||
key_sig = format_args!(
|
key_sig = format_args!(
|
||||||
"0x{:08x}",
|
"0x{:08x}",
|
||||||
if proxy_secret.len() >= 4 {
|
if proxy_secret.len() >= 4 {
|
||||||
u32::from_le_bytes([
|
u32::from_le_bytes([
|
||||||
proxy_secret[0],
|
proxy_secret[0],
|
||||||
proxy_secret[1],
|
proxy_secret[1],
|
||||||
proxy_secret[2],
|
proxy_secret[2],
|
||||||
proxy_secret[3],
|
proxy_secret[3],
|
||||||
])
|
])
|
||||||
} else {
|
} else {
|
||||||
0
|
0
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
"Proxy-secret loaded"
|
"Proxy-secret loaded"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Load ME config (v4/v6) + default DC
|
||||||
|
let mut cfg_v4 = fetch_proxy_config(
|
||||||
|
"https://core.telegram.org/getProxyConfig",
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
let mut cfg_v6 = fetch_proxy_config(
|
||||||
|
"https://core.telegram.org/getProxyConfigV6",
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
if cfg_v4.map.is_empty() {
|
||||||
|
cfg_v4.map = crate::protocol::constants::TG_MIDDLE_PROXIES_V4.clone();
|
||||||
|
}
|
||||||
|
if cfg_v6.map.is_empty() {
|
||||||
|
cfg_v6.map = crate::protocol::constants::TG_MIDDLE_PROXIES_V6.clone();
|
||||||
|
}
|
||||||
|
|
||||||
let pool = MePool::new(
|
let pool = MePool::new(
|
||||||
proxy_tag,
|
proxy_tag,
|
||||||
@@ -235,6 +317,14 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
config.general.middle_proxy_nat_ip,
|
config.general.middle_proxy_nat_ip,
|
||||||
config.general.middle_proxy_nat_probe,
|
config.general.middle_proxy_nat_probe,
|
||||||
config.general.middle_proxy_nat_stun.clone(),
|
config.general.middle_proxy_nat_stun.clone(),
|
||||||
|
probe.detected_ipv6,
|
||||||
|
config.timeouts.me_one_retry,
|
||||||
|
config.timeouts.me_one_timeout_ms,
|
||||||
|
cfg_v4.map.clone(),
|
||||||
|
cfg_v6.map.clone(),
|
||||||
|
cfg_v4.default_dc.or(cfg_v6.default_dc),
|
||||||
|
decision.clone(),
|
||||||
|
rng.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
match pool.init(2, &rng).await {
|
match pool.init(2, &rng).await {
|
||||||
@@ -251,6 +341,30 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
.await;
|
.await;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Periodic ME connection rotation
|
||||||
|
let pool_clone_rot = pool.clone();
|
||||||
|
let rng_clone_rot = rng.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
crate::transport::middle_proxy::me_rotation_task(
|
||||||
|
pool_clone_rot,
|
||||||
|
rng_clone_rot,
|
||||||
|
std::time::Duration::from_secs(1800),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Periodic updater: getProxyConfig + proxy-secret
|
||||||
|
let pool_clone2 = pool.clone();
|
||||||
|
let rng_clone2 = rng.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
crate::transport::middle_proxy::me_config_updater(
|
||||||
|
pool_clone2,
|
||||||
|
rng_clone2,
|
||||||
|
std::time::Duration::from_secs(12 * 3600),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
Some(pool)
|
Some(pool)
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@@ -268,95 +382,186 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// If ME failed to initialize, force direct-only mode.
|
||||||
if me_pool.is_some() {
|
if me_pool.is_some() {
|
||||||
info!("Transport: Middle Proxy (supports all DCs including CDN)");
|
info!("Transport: Middle-End Proxy - all DC-over-RPC");
|
||||||
} else {
|
} else {
|
||||||
info!("Transport: Direct TCP (standard DCs only)");
|
use_middle_proxy = false;
|
||||||
|
// Make runtime config reflect direct-only mode for handlers.
|
||||||
|
config.general.use_middle_proxy = false;
|
||||||
|
info!("Transport: Direct DC - TCP - standard DC-over-TCP");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Startup DC ping (only meaningful in direct mode)
|
// Freeze config after possible fallback decision
|
||||||
if me_pool.is_none() {
|
let config = Arc::new(config);
|
||||||
info!("================= Telegram DC Connectivity =================");
|
|
||||||
|
|
||||||
let ping_results = upstream_manager.ping_all_dcs(prefer_ipv6).await;
|
let replay_checker = Arc::new(ReplayChecker::new(
|
||||||
|
config.access.replay_check_len,
|
||||||
|
Duration::from_secs(config.access.replay_window_secs),
|
||||||
|
));
|
||||||
|
|
||||||
for upstream_result in &ping_results {
|
let upstream_manager = Arc::new(UpstreamManager::new(config.upstreams.clone()));
|
||||||
// Show which IP version is in use and which is fallback
|
let buffer_pool = Arc::new(BufferPool::with_config(16 * 1024, 4096));
|
||||||
if upstream_result.both_available {
|
|
||||||
if prefer_ipv6 {
|
|
||||||
info!(" IPv6 in use and IPv4 is fallback");
|
|
||||||
} else {
|
|
||||||
info!(" IPv4 in use and IPv6 is fallback");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let v6_works = upstream_result
|
|
||||||
.v6_results
|
|
||||||
.iter()
|
|
||||||
.any(|r| r.rtt_ms.is_some());
|
|
||||||
let v4_works = upstream_result
|
|
||||||
.v4_results
|
|
||||||
.iter()
|
|
||||||
.any(|r| r.rtt_ms.is_some());
|
|
||||||
if v6_works && !v4_works {
|
|
||||||
info!(" IPv6 only (IPv4 unavailable)");
|
|
||||||
} else if v4_works && !v6_works {
|
|
||||||
info!(" IPv4 only (IPv6 unavailable)");
|
|
||||||
} else if !v6_works && !v4_works {
|
|
||||||
info!(" No connectivity!");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info!(" via {}", upstream_result.upstream_name);
|
// Middle-End ping before DC connectivity
|
||||||
info!("============================================================");
|
if let Some(ref pool) = me_pool {
|
||||||
|
let me_results = run_me_ping(pool, &rng).await;
|
||||||
|
|
||||||
// Print IPv6 results first
|
let v4_ok = me_results.iter().any(|r| {
|
||||||
for dc in &upstream_result.v6_results {
|
matches!(r.family, MePingFamily::V4)
|
||||||
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
|
&& r.samples.iter().any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||||
match &dc.rtt_ms {
|
});
|
||||||
Some(rtt) => {
|
let v6_ok = me_results.iter().any(|r| {
|
||||||
// Align: IPv6 addresses are longer, use fewer tabs
|
matches!(r.family, MePingFamily::V6)
|
||||||
// [2001:b28:f23d:f001::a]:443 = ~28 chars
|
&& r.samples.iter().any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||||
info!(" DC{} [IPv6] {}:\t\t{:.0} ms", dc.dc_idx, addr_str, rtt);
|
});
|
||||||
}
|
|
||||||
None => {
|
|
||||||
let err = dc.error.as_deref().unwrap_or("fail");
|
|
||||||
info!(" DC{} [IPv6] {}:\t\tFAIL ({})", dc.dc_idx, addr_str, err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("============================================================");
|
info!("================= Telegram ME Connectivity =================");
|
||||||
|
if v4_ok && v6_ok {
|
||||||
// Print IPv4 results
|
info!(" IPv4 and IPv6 available");
|
||||||
for dc in &upstream_result.v4_results {
|
} else if v4_ok {
|
||||||
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
|
info!(" IPv4 only / IPv6 unavailable");
|
||||||
match &dc.rtt_ms {
|
} else if v6_ok {
|
||||||
Some(rtt) => {
|
info!(" IPv6 only / IPv4 unavailable");
|
||||||
// Align: IPv4 addresses are shorter, use more tabs
|
} else {
|
||||||
// 149.154.175.50:443 = ~18 chars
|
info!(" No ME connectivity");
|
||||||
info!(
|
|
||||||
" DC{} [IPv4] {}:\t\t\t\t{:.0} ms",
|
|
||||||
dc.dc_idx, addr_str, rtt
|
|
||||||
);
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
let err = dc.error.as_deref().unwrap_or("fail");
|
|
||||||
info!(
|
|
||||||
" DC{} [IPv4] {}:\t\t\t\tFAIL ({})",
|
|
||||||
dc.dc_idx, addr_str, err
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("============================================================");
|
|
||||||
}
|
}
|
||||||
|
info!(" via direct");
|
||||||
|
info!("============================================================");
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
let mut grouped: BTreeMap<i32, Vec<MePingSample>> = BTreeMap::new();
|
||||||
|
for report in me_results {
|
||||||
|
for s in report.samples {
|
||||||
|
let key = s.dc.abs();
|
||||||
|
grouped.entry(key).or_default().push(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let family_order = if prefer_ipv6 {
|
||||||
|
vec![(MePingFamily::V6, true), (MePingFamily::V6, false), (MePingFamily::V4, true), (MePingFamily::V4, false)]
|
||||||
|
} else {
|
||||||
|
vec![(MePingFamily::V4, true), (MePingFamily::V4, false), (MePingFamily::V6, true), (MePingFamily::V6, false)]
|
||||||
|
};
|
||||||
|
|
||||||
|
for (dc_abs, samples) in grouped {
|
||||||
|
for (family, is_pos) in &family_order {
|
||||||
|
let fam_samples: Vec<&MePingSample> = samples
|
||||||
|
.iter()
|
||||||
|
.filter(|s| matches!(s.family, f if &f == family) && (s.dc >= 0) == *is_pos)
|
||||||
|
.collect();
|
||||||
|
if fam_samples.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let fam_label = match family {
|
||||||
|
MePingFamily::V4 => "IPv4",
|
||||||
|
MePingFamily::V6 => "IPv6",
|
||||||
|
};
|
||||||
|
info!(" DC{} [{}]", dc_abs, fam_label);
|
||||||
|
for sample in fam_samples {
|
||||||
|
let line = format_sample_line(sample);
|
||||||
|
info!("{}", line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!("============================================================");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
info!("================= Telegram DC Connectivity =================");
|
||||||
|
|
||||||
|
let ping_results = upstream_manager
|
||||||
|
.ping_all_dcs(
|
||||||
|
prefer_ipv6,
|
||||||
|
&config.dc_overrides,
|
||||||
|
decision.ipv4_dc,
|
||||||
|
decision.ipv6_dc,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
for upstream_result in &ping_results {
|
||||||
|
let v6_works = upstream_result
|
||||||
|
.v6_results
|
||||||
|
.iter()
|
||||||
|
.any(|r| r.rtt_ms.is_some());
|
||||||
|
let v4_works = upstream_result
|
||||||
|
.v4_results
|
||||||
|
.iter()
|
||||||
|
.any(|r| r.rtt_ms.is_some());
|
||||||
|
|
||||||
|
if upstream_result.both_available {
|
||||||
|
if prefer_ipv6 {
|
||||||
|
info!(" IPv6 in use / IPv4 is fallback");
|
||||||
|
} else {
|
||||||
|
info!(" IPv4 in use / IPv6 is fallback");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if v6_works && !v4_works {
|
||||||
|
info!(" IPv6 only / IPv4 unavailable)");
|
||||||
|
} else if v4_works && !v6_works {
|
||||||
|
info!(" IPv4 only / IPv6 unavailable)");
|
||||||
|
} else if !v6_works && !v4_works {
|
||||||
|
info!(" No DC connectivity");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info!(" via {}", upstream_result.upstream_name);
|
||||||
|
info!("============================================================");
|
||||||
|
|
||||||
|
// Print IPv6 results first (only if IPv6 is available)
|
||||||
|
if v6_works {
|
||||||
|
for dc in &upstream_result.v6_results {
|
||||||
|
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
|
||||||
|
match &dc.rtt_ms {
|
||||||
|
Some(rtt) => {
|
||||||
|
info!(" DC{} [IPv6] {} - {:.0} ms", dc.dc_idx, addr_str, rtt);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let err = dc.error.as_deref().unwrap_or("fail");
|
||||||
|
info!(" DC{} [IPv6] {} - FAIL ({})", dc.dc_idx, addr_str, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("============================================================");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print IPv4 results (only if IPv4 is available)
|
||||||
|
if v4_works {
|
||||||
|
for dc in &upstream_result.v4_results {
|
||||||
|
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
|
||||||
|
match &dc.rtt_ms {
|
||||||
|
Some(rtt) => {
|
||||||
|
info!(
|
||||||
|
" DC{} [IPv4] {}\t\t\t\t{:.0} ms",
|
||||||
|
dc.dc_idx, addr_str, rtt
|
||||||
|
);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let err = dc.error.as_deref().unwrap_or("fail");
|
||||||
|
info!(
|
||||||
|
" DC{} [IPv4] {}:\t\t\t\tFAIL ({})",
|
||||||
|
dc.dc_idx, addr_str, err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("============================================================");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Background tasks
|
// Background tasks
|
||||||
let um_clone = upstream_manager.clone();
|
let um_clone = upstream_manager.clone();
|
||||||
|
let decision_clone = decision.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
um_clone.run_health_checks(prefer_ipv6).await;
|
um_clone
|
||||||
|
.run_health_checks(
|
||||||
|
prefer_ipv6,
|
||||||
|
decision_clone.ipv4_dc,
|
||||||
|
decision_clone.ipv6_dc,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
});
|
});
|
||||||
|
|
||||||
let rc_clone = replay_checker.clone();
|
let rc_clone = replay_checker.clone();
|
||||||
@@ -364,16 +569,31 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
rc_clone.run_periodic_cleanup().await;
|
rc_clone.run_periodic_cleanup().await;
|
||||||
});
|
});
|
||||||
|
|
||||||
let detected_ip = detect_ip().await;
|
let detected_ip_v4: Option<std::net::IpAddr> = probe
|
||||||
|
.reflected_ipv4
|
||||||
|
.map(|s| s.ip())
|
||||||
|
.or_else(|| probe.detected_ipv4.map(std::net::IpAddr::V4));
|
||||||
|
let detected_ip_v6: Option<std::net::IpAddr> = probe
|
||||||
|
.reflected_ipv6
|
||||||
|
.map(|s| s.ip())
|
||||||
|
.or_else(|| probe.detected_ipv6.map(std::net::IpAddr::V6));
|
||||||
debug!(
|
debug!(
|
||||||
"Detected IPs: v4={:?} v6={:?}",
|
"Detected IPs: v4={:?} v6={:?}",
|
||||||
detected_ip.ipv4, detected_ip.ipv6
|
detected_ip_v4, detected_ip_v6
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut listeners = Vec::new();
|
let mut listeners = Vec::new();
|
||||||
|
|
||||||
for listener_conf in &config.server.listeners {
|
for listener_conf in &config.server.listeners {
|
||||||
let addr = SocketAddr::new(listener_conf.ip, config.server.port);
|
let addr = SocketAddr::new(listener_conf.ip, config.server.port);
|
||||||
|
if addr.is_ipv4() && !decision.ipv4_dc {
|
||||||
|
warn!(%addr, "Skipping IPv4 listener: IPv4 disabled by [network]");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if addr.is_ipv6() && !decision.ipv6_dc {
|
||||||
|
warn!(%addr, "Skipping IPv6 listener: IPv6 disabled by [network]");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
let options = ListenOptions {
|
let options = ListenOptions {
|
||||||
ipv6_only: listener_conf.ip.is_ipv6(),
|
ipv6_only: listener_conf.ip.is_ipv6(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -384,47 +604,28 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
let listener = TcpListener::from_std(socket.into())?;
|
let listener = TcpListener::from_std(socket.into())?;
|
||||||
info!("Listening on {}", addr);
|
info!("Listening on {}", addr);
|
||||||
|
|
||||||
let public_ip = if let Some(ip) = listener_conf.announce_ip {
|
// Resolve the public host for link generation
|
||||||
ip
|
let public_host = if let Some(ref announce) = listener_conf.announce {
|
||||||
|
announce.clone() // Use announce (IP or hostname) if explicitly set
|
||||||
} else if listener_conf.ip.is_unspecified() {
|
} else if listener_conf.ip.is_unspecified() {
|
||||||
|
// Auto-detect for unspecified addresses
|
||||||
if listener_conf.ip.is_ipv4() {
|
if listener_conf.ip.is_ipv4() {
|
||||||
detected_ip.ipv4.unwrap_or(listener_conf.ip)
|
detected_ip_v4
|
||||||
|
.map(|ip| ip.to_string())
|
||||||
|
.unwrap_or_else(|| listener_conf.ip.to_string())
|
||||||
} else {
|
} else {
|
||||||
detected_ip.ipv6.unwrap_or(listener_conf.ip)
|
detected_ip_v6
|
||||||
|
.map(|ip| ip.to_string())
|
||||||
|
.unwrap_or_else(|| listener_conf.ip.to_string())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
listener_conf.ip
|
listener_conf.ip.to_string()
|
||||||
};
|
};
|
||||||
|
|
||||||
if !config.show_link.is_empty() {
|
// Show per-listener proxy links only when public_host is not set
|
||||||
info!("--- Proxy Links ({}) ---", public_ip);
|
if config.general.links.public_host.is_none() && !config.general.links.show.is_empty() {
|
||||||
for user_name in &config.show_link {
|
let link_port = config.general.links.public_port.unwrap_or(config.server.port);
|
||||||
if let Some(secret) = config.access.users.get(user_name) {
|
print_proxy_links(&public_host, link_port, &config);
|
||||||
info!("User: {}", user_name);
|
|
||||||
if config.general.modes.classic {
|
|
||||||
info!(
|
|
||||||
" Classic: tg://proxy?server={}&port={}&secret={}",
|
|
||||||
public_ip, config.server.port, secret
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if config.general.modes.secure {
|
|
||||||
info!(
|
|
||||||
" DD: tg://proxy?server={}&port={}&secret=dd{}",
|
|
||||||
public_ip, config.server.port, secret
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if config.general.modes.tls {
|
|
||||||
let domain_hex = hex::encode(&config.censorship.tls_domain);
|
|
||||||
info!(
|
|
||||||
" EE-TLS: tg://proxy?server={}&port={}&secret=ee{}{}",
|
|
||||||
public_ip, config.server.port, secret, domain_hex
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
warn!("User '{}' in show_link not found", user_name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
info!("------------------------");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
listeners.push(listener);
|
listeners.push(listener);
|
||||||
@@ -435,7 +636,103 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if listeners.is_empty() {
|
// Show proxy links once when public_host is set, OR when there are no TCP listeners
|
||||||
|
// (unix-only mode) — use detected IP as fallback
|
||||||
|
if !config.general.links.show.is_empty() && (config.general.links.public_host.is_some() || listeners.is_empty()) {
|
||||||
|
let (host, port) = if let Some(ref h) = config.general.links.public_host {
|
||||||
|
(h.clone(), config.general.links.public_port.unwrap_or(config.server.port))
|
||||||
|
} else {
|
||||||
|
let ip = detected_ip_v4
|
||||||
|
.or(detected_ip_v6)
|
||||||
|
.map(|ip| ip.to_string());
|
||||||
|
if ip.is_none() {
|
||||||
|
warn!("show_link is configured but public IP could not be detected. Set public_host in config.");
|
||||||
|
}
|
||||||
|
(ip.unwrap_or_else(|| "UNKNOWN".to_string()), config.general.links.public_port.unwrap_or(config.server.port))
|
||||||
|
};
|
||||||
|
|
||||||
|
print_proxy_links(&host, port, &config);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unix socket setup (before listeners check so unix-only config works)
|
||||||
|
let mut has_unix_listener = false;
|
||||||
|
#[cfg(unix)]
|
||||||
|
if let Some(ref unix_path) = config.server.listen_unix_sock {
|
||||||
|
// Remove stale socket file if present (standard practice)
|
||||||
|
let _ = tokio::fs::remove_file(unix_path).await;
|
||||||
|
|
||||||
|
let unix_listener = UnixListener::bind(unix_path)?;
|
||||||
|
|
||||||
|
// Apply socket permissions if configured
|
||||||
|
if let Some(ref perm_str) = config.server.listen_unix_sock_perm {
|
||||||
|
match u32::from_str_radix(perm_str.trim_start_matches('0'), 8) {
|
||||||
|
Ok(mode) => {
|
||||||
|
use std::os::unix::fs::PermissionsExt;
|
||||||
|
let perms = std::fs::Permissions::from_mode(mode);
|
||||||
|
if let Err(e) = std::fs::set_permissions(unix_path, perms) {
|
||||||
|
error!("Failed to set unix socket permissions to {}: {}", perm_str, e);
|
||||||
|
} else {
|
||||||
|
info!("Listening on unix:{} (mode {})", unix_path, perm_str);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Invalid listen_unix_sock_perm '{}': {}. Ignoring.", perm_str, e);
|
||||||
|
info!("Listening on unix:{}", unix_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
info!("Listening on unix:{}", unix_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
has_unix_listener = true;
|
||||||
|
|
||||||
|
let config = config.clone();
|
||||||
|
let stats = stats.clone();
|
||||||
|
let upstream_manager = upstream_manager.clone();
|
||||||
|
let replay_checker = replay_checker.clone();
|
||||||
|
let buffer_pool = buffer_pool.clone();
|
||||||
|
let rng = rng.clone();
|
||||||
|
let me_pool = me_pool.clone();
|
||||||
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let unix_conn_counter = std::sync::Arc::new(std::sync::atomic::AtomicU64::new(1));
|
||||||
|
|
||||||
|
loop {
|
||||||
|
match unix_listener.accept().await {
|
||||||
|
Ok((stream, _)) => {
|
||||||
|
let conn_id = unix_conn_counter.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||||
|
let fake_peer = SocketAddr::from(([127, 0, 0, 1], (conn_id % 65535) as u16));
|
||||||
|
|
||||||
|
let config = config.clone();
|
||||||
|
let stats = stats.clone();
|
||||||
|
let upstream_manager = upstream_manager.clone();
|
||||||
|
let replay_checker = replay_checker.clone();
|
||||||
|
let buffer_pool = buffer_pool.clone();
|
||||||
|
let rng = rng.clone();
|
||||||
|
let me_pool = me_pool.clone();
|
||||||
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = crate::proxy::client::handle_client_stream(
|
||||||
|
stream, fake_peer, config, stats,
|
||||||
|
upstream_manager, replay_checker, buffer_pool, rng,
|
||||||
|
me_pool, ip_tracker,
|
||||||
|
).await {
|
||||||
|
debug!(error = %e, "Unix socket connection error");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Unix socket accept error: {}", e);
|
||||||
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if listeners.is_empty() && !has_unix_listener {
|
||||||
error!("No listeners. Exiting.");
|
error!("No listeners. Exiting.");
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
@@ -450,6 +747,14 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
.reload(runtime_filter)
|
.reload(runtime_filter)
|
||||||
.expect("Failed to switch log filter");
|
.expect("Failed to switch log filter");
|
||||||
|
|
||||||
|
if let Some(port) = config.server.metrics_port {
|
||||||
|
let stats = stats.clone();
|
||||||
|
let whitelist = config.server.metrics_whitelist.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
metrics::serve(port, stats, whitelist).await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
for listener in listeners {
|
for listener in listeners {
|
||||||
let config = config.clone();
|
let config = config.clone();
|
||||||
let stats = stats.clone();
|
let stats = stats.clone();
|
||||||
@@ -458,6 +763,7 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
let buffer_pool = buffer_pool.clone();
|
let buffer_pool = buffer_pool.clone();
|
||||||
let rng = rng.clone();
|
let rng = rng.clone();
|
||||||
let me_pool = me_pool.clone();
|
let me_pool = me_pool.clone();
|
||||||
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
loop {
|
loop {
|
||||||
@@ -470,6 +776,7 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
let buffer_pool = buffer_pool.clone();
|
let buffer_pool = buffer_pool.clone();
|
||||||
let rng = rng.clone();
|
let rng = rng.clone();
|
||||||
let me_pool = me_pool.clone();
|
let me_pool = me_pool.clone();
|
||||||
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(e) = ClientHandler::new(
|
if let Err(e) = ClientHandler::new(
|
||||||
@@ -482,6 +789,7 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
buffer_pool,
|
buffer_pool,
|
||||||
rng,
|
rng,
|
||||||
me_pool,
|
me_pool,
|
||||||
|
ip_tracker,
|
||||||
)
|
)
|
||||||
.run()
|
.run()
|
||||||
.await
|
.await
|
||||||
|
|||||||
197
src/metrics.rs
Normal file
197
src/metrics.rs
Normal file
@@ -0,0 +1,197 @@
|
|||||||
|
use std::convert::Infallible;
|
||||||
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use http_body_util::Full;
|
||||||
|
use hyper::body::Bytes;
|
||||||
|
use hyper::server::conn::http1;
|
||||||
|
use hyper::service::service_fn;
|
||||||
|
use hyper::{Request, Response, StatusCode};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tracing::{info, warn, debug};
|
||||||
|
|
||||||
|
use crate::stats::Stats;
|
||||||
|
|
||||||
|
pub async fn serve(port: u16, stats: Arc<Stats>, whitelist: Vec<IpAddr>) {
|
||||||
|
let addr = SocketAddr::from(([0, 0, 0, 0], port));
|
||||||
|
let listener = match TcpListener::bind(addr).await {
|
||||||
|
Ok(l) => l,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(error = %e, "Failed to bind metrics on {}", addr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
info!("Metrics endpoint: http://{}/metrics", addr);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let (stream, peer) = match listener.accept().await {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(error = %e, "Metrics accept error");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if !whitelist.is_empty() && !whitelist.contains(&peer.ip()) {
|
||||||
|
debug!(peer = %peer, "Metrics request denied by whitelist");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let stats = stats.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let svc = service_fn(move |req| {
|
||||||
|
let stats = stats.clone();
|
||||||
|
async move { handle(req, &stats) }
|
||||||
|
});
|
||||||
|
if let Err(e) = http1::Builder::new()
|
||||||
|
.serve_connection(hyper_util::rt::TokioIo::new(stream), svc)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
debug!(error = %e, "Metrics connection error");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle(req: Request<hyper::body::Incoming>, stats: &Stats) -> Result<Response<Full<Bytes>>, Infallible> {
|
||||||
|
if req.uri().path() != "/metrics" {
|
||||||
|
let resp = Response::builder()
|
||||||
|
.status(StatusCode::NOT_FOUND)
|
||||||
|
.body(Full::new(Bytes::from("Not Found\n")))
|
||||||
|
.unwrap();
|
||||||
|
return Ok(resp);
|
||||||
|
}
|
||||||
|
|
||||||
|
let body = render_metrics(stats);
|
||||||
|
let resp = Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.header("content-type", "text/plain; version=0.0.4; charset=utf-8")
|
||||||
|
.body(Full::new(Bytes::from(body)))
|
||||||
|
.unwrap();
|
||||||
|
Ok(resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_metrics(stats: &Stats) -> String {
|
||||||
|
use std::fmt::Write;
|
||||||
|
let mut out = String::with_capacity(4096);
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_uptime_seconds Proxy uptime");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_uptime_seconds gauge");
|
||||||
|
let _ = writeln!(out, "telemt_uptime_seconds {:.1}", stats.uptime_secs());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_connections_total Total accepted connections");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_connections_total counter");
|
||||||
|
let _ = writeln!(out, "telemt_connections_total {}", stats.get_connects_all());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_connections_bad_total Bad/rejected connections");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_connections_bad_total counter");
|
||||||
|
let _ = writeln!(out, "telemt_connections_bad_total {}", stats.get_connects_bad());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_handshake_timeouts_total Handshake timeouts");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_handshake_timeouts_total counter");
|
||||||
|
let _ = writeln!(out, "telemt_handshake_timeouts_total {}", stats.get_handshake_timeouts());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_user_connections_total Per-user total connections");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_user_connections_total counter");
|
||||||
|
let _ = writeln!(out, "# HELP telemt_user_connections_current Per-user active connections");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_user_connections_current gauge");
|
||||||
|
let _ = writeln!(out, "# HELP telemt_user_octets_from_client Per-user bytes received");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_user_octets_from_client counter");
|
||||||
|
let _ = writeln!(out, "# HELP telemt_user_octets_to_client Per-user bytes sent");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_user_octets_to_client counter");
|
||||||
|
let _ = writeln!(out, "# HELP telemt_user_msgs_from_client Per-user messages received");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_user_msgs_from_client counter");
|
||||||
|
let _ = writeln!(out, "# HELP telemt_user_msgs_to_client Per-user messages sent");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_user_msgs_to_client counter");
|
||||||
|
|
||||||
|
for entry in stats.iter_user_stats() {
|
||||||
|
let user = entry.key();
|
||||||
|
let s = entry.value();
|
||||||
|
let _ = writeln!(out, "telemt_user_connections_total{{user=\"{}\"}} {}", user, s.connects.load(std::sync::atomic::Ordering::Relaxed));
|
||||||
|
let _ = writeln!(out, "telemt_user_connections_current{{user=\"{}\"}} {}", user, s.curr_connects.load(std::sync::atomic::Ordering::Relaxed));
|
||||||
|
let _ = writeln!(out, "telemt_user_octets_from_client{{user=\"{}\"}} {}", user, s.octets_from_client.load(std::sync::atomic::Ordering::Relaxed));
|
||||||
|
let _ = writeln!(out, "telemt_user_octets_to_client{{user=\"{}\"}} {}", user, s.octets_to_client.load(std::sync::atomic::Ordering::Relaxed));
|
||||||
|
let _ = writeln!(out, "telemt_user_msgs_from_client{{user=\"{}\"}} {}", user, s.msgs_from_client.load(std::sync::atomic::Ordering::Relaxed));
|
||||||
|
let _ = writeln!(out, "telemt_user_msgs_to_client{{user=\"{}\"}} {}", user, s.msgs_to_client.load(std::sync::atomic::Ordering::Relaxed));
|
||||||
|
}
|
||||||
|
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_render_metrics_format() {
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
stats.increment_connects_all();
|
||||||
|
stats.increment_connects_all();
|
||||||
|
stats.increment_connects_bad();
|
||||||
|
stats.increment_handshake_timeouts();
|
||||||
|
stats.increment_user_connects("alice");
|
||||||
|
stats.increment_user_curr_connects("alice");
|
||||||
|
stats.add_user_octets_from("alice", 1024);
|
||||||
|
stats.add_user_octets_to("alice", 2048);
|
||||||
|
stats.increment_user_msgs_from("alice");
|
||||||
|
stats.increment_user_msgs_to("alice");
|
||||||
|
stats.increment_user_msgs_to("alice");
|
||||||
|
|
||||||
|
let output = render_metrics(&stats);
|
||||||
|
|
||||||
|
assert!(output.contains("telemt_connections_total 2"));
|
||||||
|
assert!(output.contains("telemt_connections_bad_total 1"));
|
||||||
|
assert!(output.contains("telemt_handshake_timeouts_total 1"));
|
||||||
|
assert!(output.contains("telemt_user_connections_total{user=\"alice\"} 1"));
|
||||||
|
assert!(output.contains("telemt_user_connections_current{user=\"alice\"} 1"));
|
||||||
|
assert!(output.contains("telemt_user_octets_from_client{user=\"alice\"} 1024"));
|
||||||
|
assert!(output.contains("telemt_user_octets_to_client{user=\"alice\"} 2048"));
|
||||||
|
assert!(output.contains("telemt_user_msgs_from_client{user=\"alice\"} 1"));
|
||||||
|
assert!(output.contains("telemt_user_msgs_to_client{user=\"alice\"} 2"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_render_empty_stats() {
|
||||||
|
let stats = Stats::new();
|
||||||
|
let output = render_metrics(&stats);
|
||||||
|
assert!(output.contains("telemt_connections_total 0"));
|
||||||
|
assert!(output.contains("telemt_connections_bad_total 0"));
|
||||||
|
assert!(output.contains("telemt_handshake_timeouts_total 0"));
|
||||||
|
assert!(!output.contains("user="));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_render_has_type_annotations() {
|
||||||
|
let stats = Stats::new();
|
||||||
|
let output = render_metrics(&stats);
|
||||||
|
assert!(output.contains("# TYPE telemt_uptime_seconds gauge"));
|
||||||
|
assert!(output.contains("# TYPE telemt_connections_total counter"));
|
||||||
|
assert!(output.contains("# TYPE telemt_connections_bad_total counter"));
|
||||||
|
assert!(output.contains("# TYPE telemt_handshake_timeouts_total counter"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_endpoint_integration() {
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
stats.increment_connects_all();
|
||||||
|
stats.increment_connects_all();
|
||||||
|
stats.increment_connects_all();
|
||||||
|
|
||||||
|
let port = 19091u16;
|
||||||
|
let s = stats.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
serve(port, s, vec![]).await;
|
||||||
|
});
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
|
||||||
|
|
||||||
|
let resp = reqwest::get(format!("http://127.0.0.1:{}/metrics", port))
|
||||||
|
.await.unwrap();
|
||||||
|
assert_eq!(resp.status(), 200);
|
||||||
|
let body = resp.text().await.unwrap();
|
||||||
|
assert!(body.contains("telemt_connections_total 3"));
|
||||||
|
|
||||||
|
let resp404 = reqwest::get(format!("http://127.0.0.1:{}/other", port))
|
||||||
|
.await.unwrap();
|
||||||
|
assert_eq!(resp404.status(), 404);
|
||||||
|
}
|
||||||
|
}
|
||||||
4
src/network/mod.rs
Normal file
4
src/network/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
pub mod probe;
|
||||||
|
pub mod stun;
|
||||||
|
|
||||||
|
pub use stun::IpFamily;
|
||||||
231
src/network/probe.rs
Normal file
231
src/network/probe.rs
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||||
|
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
use crate::config::NetworkConfig;
|
||||||
|
use crate::error::Result;
|
||||||
|
use crate::network::stun::{stun_probe_dual, DualStunResult, IpFamily};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct NetworkProbe {
|
||||||
|
pub detected_ipv4: Option<Ipv4Addr>,
|
||||||
|
pub detected_ipv6: Option<Ipv6Addr>,
|
||||||
|
pub reflected_ipv4: Option<SocketAddr>,
|
||||||
|
pub reflected_ipv6: Option<SocketAddr>,
|
||||||
|
pub ipv4_is_bogon: bool,
|
||||||
|
pub ipv6_is_bogon: bool,
|
||||||
|
pub ipv4_nat_detected: bool,
|
||||||
|
pub ipv6_nat_detected: bool,
|
||||||
|
pub ipv4_usable: bool,
|
||||||
|
pub ipv6_usable: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct NetworkDecision {
|
||||||
|
pub ipv4_dc: bool,
|
||||||
|
pub ipv6_dc: bool,
|
||||||
|
pub ipv4_me: bool,
|
||||||
|
pub ipv6_me: bool,
|
||||||
|
pub effective_prefer: u8,
|
||||||
|
pub effective_multipath: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkDecision {
|
||||||
|
pub fn prefer_ipv6(&self) -> bool {
|
||||||
|
self.effective_prefer == 6
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn me_families(&self) -> Vec<IpFamily> {
|
||||||
|
let mut res = Vec::new();
|
||||||
|
if self.ipv4_me {
|
||||||
|
res.push(IpFamily::V4);
|
||||||
|
}
|
||||||
|
if self.ipv6_me {
|
||||||
|
res.push(IpFamily::V6);
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_probe(config: &NetworkConfig, stun_addr: Option<String>, nat_probe: bool) -> Result<NetworkProbe> {
|
||||||
|
let mut probe = NetworkProbe::default();
|
||||||
|
|
||||||
|
probe.detected_ipv4 = detect_local_ip_v4();
|
||||||
|
probe.detected_ipv6 = detect_local_ip_v6();
|
||||||
|
|
||||||
|
probe.ipv4_is_bogon = probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false);
|
||||||
|
probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false);
|
||||||
|
|
||||||
|
let stun_server = stun_addr.unwrap_or_else(|| "stun.l.google.com:19302".to_string());
|
||||||
|
let stun_res = if nat_probe {
|
||||||
|
match stun_probe_dual(&stun_server).await {
|
||||||
|
Ok(res) => res,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(error = %e, "STUN probe failed, continuing without reflection");
|
||||||
|
DualStunResult::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
DualStunResult::default()
|
||||||
|
};
|
||||||
|
probe.reflected_ipv4 = stun_res.v4.map(|r| r.reflected_addr);
|
||||||
|
probe.reflected_ipv6 = stun_res.v6.map(|r| r.reflected_addr);
|
||||||
|
|
||||||
|
probe.ipv4_nat_detected = match (probe.detected_ipv4, probe.reflected_ipv4) {
|
||||||
|
(Some(det), Some(reflected)) => det != reflected.ip(),
|
||||||
|
_ => false,
|
||||||
|
};
|
||||||
|
probe.ipv6_nat_detected = match (probe.detected_ipv6, probe.reflected_ipv6) {
|
||||||
|
(Some(det), Some(reflected)) => det != reflected.ip(),
|
||||||
|
_ => false,
|
||||||
|
};
|
||||||
|
|
||||||
|
probe.ipv4_usable = config.ipv4
|
||||||
|
&& probe.detected_ipv4.is_some()
|
||||||
|
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.map(|r| !is_bogon(r.ip())).unwrap_or(false));
|
||||||
|
|
||||||
|
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
|
||||||
|
probe.ipv6_usable = ipv6_enabled
|
||||||
|
&& probe.detected_ipv6.is_some()
|
||||||
|
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.map(|r| !is_bogon(r.ip())).unwrap_or(false));
|
||||||
|
|
||||||
|
Ok(probe)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe) -> NetworkDecision {
|
||||||
|
let mut decision = NetworkDecision::default();
|
||||||
|
|
||||||
|
decision.ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some();
|
||||||
|
decision.ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
|
||||||
|
|
||||||
|
decision.ipv4_me = config.ipv4
|
||||||
|
&& probe.detected_ipv4.is_some()
|
||||||
|
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.is_some());
|
||||||
|
|
||||||
|
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
|
||||||
|
decision.ipv6_me = ipv6_enabled
|
||||||
|
&& probe.detected_ipv6.is_some()
|
||||||
|
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.is_some());
|
||||||
|
|
||||||
|
decision.effective_prefer = match config.prefer {
|
||||||
|
6 if decision.ipv6_me || decision.ipv6_dc => 6,
|
||||||
|
4 if decision.ipv4_me || decision.ipv4_dc => 4,
|
||||||
|
6 => {
|
||||||
|
warn!("prefer=6 requested but IPv6 unavailable; falling back to IPv4");
|
||||||
|
4
|
||||||
|
}
|
||||||
|
_ => 4,
|
||||||
|
};
|
||||||
|
|
||||||
|
let me_families = decision.ipv4_me as u8 + decision.ipv6_me as u8;
|
||||||
|
decision.effective_multipath = config.multipath && me_families >= 2;
|
||||||
|
|
||||||
|
decision
|
||||||
|
}
|
||||||
|
|
||||||
|
fn detect_local_ip_v4() -> Option<Ipv4Addr> {
|
||||||
|
let socket = UdpSocket::bind("0.0.0.0:0").ok()?;
|
||||||
|
socket.connect("8.8.8.8:80").ok()?;
|
||||||
|
match socket.local_addr().ok()?.ip() {
|
||||||
|
IpAddr::V4(v4) => Some(v4),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn detect_local_ip_v6() -> Option<Ipv6Addr> {
|
||||||
|
let socket = UdpSocket::bind("[::]:0").ok()?;
|
||||||
|
socket.connect("[2001:4860:4860::8888]:80").ok()?;
|
||||||
|
match socket.local_addr().ok()?.ip() {
|
||||||
|
IpAddr::V6(v6) => Some(v6),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_bogon(ip: IpAddr) -> bool {
|
||||||
|
match ip {
|
||||||
|
IpAddr::V4(v4) => is_bogon_v4(v4),
|
||||||
|
IpAddr::V6(v6) => is_bogon_v6(v6),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_bogon_v4(ip: Ipv4Addr) -> bool {
|
||||||
|
let octets = ip.octets();
|
||||||
|
if ip.is_private() || ip.is_loopback() || ip.is_link_local() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if octets[0] == 0 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if octets[0] == 100 && (octets[1] & 0xC0) == 64 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if octets[0] == 192 && octets[1] == 0 && octets[2] == 0 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if octets[0] == 192 && octets[1] == 0 && octets[2] == 2 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if octets[0] == 198 && (octets[1] & 0xFE) == 18 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if octets[0] == 198 && octets[1] == 51 && octets[2] == 100 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if octets[0] == 203 && octets[1] == 0 && octets[2] == 113 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if ip.is_multicast() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if octets[0] >= 240 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if ip.is_broadcast() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_bogon_v6(ip: Ipv6Addr) -> bool {
|
||||||
|
if ip.is_unspecified() || ip.is_loopback() || ip.is_unique_local() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
let segs = ip.segments();
|
||||||
|
if (segs[0] & 0xFFC0) == 0xFE80 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if segs[0..5] == [0, 0, 0, 0, 0] && segs[5] == 0xFFFF {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if segs[0] == 0x0100 && segs[1..4] == [0, 0, 0] {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if segs[0] == 0x2001 && segs[1] == 0x0db8 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if segs[0] == 0x2002 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if ip.is_multicast() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn log_probe_result(probe: &NetworkProbe, decision: &NetworkDecision) {
|
||||||
|
info!(
|
||||||
|
ipv4 = probe.detected_ipv4.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
|
||||||
|
ipv6 = probe.detected_ipv6.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
|
||||||
|
reflected_v4 = probe.reflected_ipv4.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
|
||||||
|
reflected_v6 = probe.reflected_ipv6.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
|
||||||
|
ipv4_bogon = probe.ipv4_is_bogon,
|
||||||
|
ipv6_bogon = probe.ipv6_is_bogon,
|
||||||
|
ipv4_me = decision.ipv4_me,
|
||||||
|
ipv6_me = decision.ipv6_me,
|
||||||
|
ipv4_dc = decision.ipv4_dc,
|
||||||
|
ipv6_dc = decision.ipv6_dc,
|
||||||
|
prefer = decision.effective_prefer,
|
||||||
|
multipath = decision.effective_multipath,
|
||||||
|
"Network capabilities resolved"
|
||||||
|
);
|
||||||
|
}
|
||||||
203
src/network/stun.rs
Normal file
203
src/network/stun.rs
Normal file
@@ -0,0 +1,203 @@
|
|||||||
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||||
|
|
||||||
|
use tokio::net::{lookup_host, UdpSocket};
|
||||||
|
use tokio::time::{timeout, Duration, sleep};
|
||||||
|
|
||||||
|
use crate::error::{ProxyError, Result};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||||
|
pub enum IpFamily {
|
||||||
|
V4,
|
||||||
|
V6,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
pub struct StunProbeResult {
|
||||||
|
pub local_addr: SocketAddr,
|
||||||
|
pub reflected_addr: SocketAddr,
|
||||||
|
pub family: IpFamily,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Clone)]
|
||||||
|
pub struct DualStunResult {
|
||||||
|
pub v4: Option<StunProbeResult>,
|
||||||
|
pub v6: Option<StunProbeResult>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn stun_probe_dual(stun_addr: &str) -> Result<DualStunResult> {
|
||||||
|
let (v4, v6) = tokio::join!(
|
||||||
|
stun_probe_family(stun_addr, IpFamily::V4),
|
||||||
|
stun_probe_family(stun_addr, IpFamily::V6),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(DualStunResult {
|
||||||
|
v4: v4?,
|
||||||
|
v6: v6?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn stun_probe_family(stun_addr: &str, family: IpFamily) -> Result<Option<StunProbeResult>> {
|
||||||
|
use rand::RngCore;
|
||||||
|
|
||||||
|
let bind_addr = match family {
|
||||||
|
IpFamily::V4 => "0.0.0.0:0",
|
||||||
|
IpFamily::V6 => "[::]:0",
|
||||||
|
};
|
||||||
|
|
||||||
|
let socket = UdpSocket::bind(bind_addr)
|
||||||
|
.await
|
||||||
|
.map_err(|e| ProxyError::Proxy(format!("STUN bind failed: {e}")))?;
|
||||||
|
|
||||||
|
let target_addr = resolve_stun_addr(stun_addr, family).await?;
|
||||||
|
if let Some(addr) = target_addr {
|
||||||
|
socket
|
||||||
|
.connect(addr)
|
||||||
|
.await
|
||||||
|
.map_err(|e| ProxyError::Proxy(format!("STUN connect failed: {e}")))?;
|
||||||
|
} else {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut req = [0u8; 20];
|
||||||
|
req[0..2].copy_from_slice(&0x0001u16.to_be_bytes()); // Binding Request
|
||||||
|
req[2..4].copy_from_slice(&0u16.to_be_bytes()); // length
|
||||||
|
req[4..8].copy_from_slice(&0x2112A442u32.to_be_bytes()); // magic cookie
|
||||||
|
rand::rng().fill_bytes(&mut req[8..20]); // transaction ID
|
||||||
|
|
||||||
|
let mut buf = [0u8; 256];
|
||||||
|
let mut attempt = 0;
|
||||||
|
let mut backoff = Duration::from_secs(1);
|
||||||
|
loop {
|
||||||
|
socket
|
||||||
|
.send(&req)
|
||||||
|
.await
|
||||||
|
.map_err(|e| ProxyError::Proxy(format!("STUN send failed: {e}")))?;
|
||||||
|
|
||||||
|
let recv_res = timeout(Duration::from_secs(3), socket.recv(&mut buf)).await;
|
||||||
|
let n = match recv_res {
|
||||||
|
Ok(Ok(n)) => n,
|
||||||
|
Ok(Err(e)) => return Err(ProxyError::Proxy(format!("STUN recv failed: {e}"))),
|
||||||
|
Err(_) => {
|
||||||
|
attempt += 1;
|
||||||
|
if attempt >= 3 {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
sleep(backoff).await;
|
||||||
|
backoff *= 2;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if n < 20 {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
let magic = 0x2112A442u32.to_be_bytes();
|
||||||
|
let txid = &req[8..20];
|
||||||
|
let mut idx = 20;
|
||||||
|
while idx + 4 <= n {
|
||||||
|
let atype = u16::from_be_bytes(buf[idx..idx + 2].try_into().unwrap());
|
||||||
|
let alen = u16::from_be_bytes(buf[idx + 2..idx + 4].try_into().unwrap()) as usize;
|
||||||
|
idx += 4;
|
||||||
|
if idx + alen > n {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
match atype {
|
||||||
|
0x0020 /* XOR-MAPPED-ADDRESS */ | 0x0001 /* MAPPED-ADDRESS */ => {
|
||||||
|
if alen < 8 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let family_byte = buf[idx + 1];
|
||||||
|
let port_bytes = [buf[idx + 2], buf[idx + 3]];
|
||||||
|
let len_check = match family_byte {
|
||||||
|
0x01 => 4,
|
||||||
|
0x02 => 16,
|
||||||
|
_ => 0,
|
||||||
|
};
|
||||||
|
if len_check == 0 || alen < 4 + len_check {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
let raw_ip = &buf[idx + 4..idx + 4 + len_check];
|
||||||
|
let mut port = u16::from_be_bytes(port_bytes);
|
||||||
|
|
||||||
|
let reflected_ip = if atype == 0x0020 {
|
||||||
|
port ^= ((magic[0] as u16) << 8) | magic[1] as u16;
|
||||||
|
match family_byte {
|
||||||
|
0x01 => {
|
||||||
|
let ip = [
|
||||||
|
raw_ip[0] ^ magic[0],
|
||||||
|
raw_ip[1] ^ magic[1],
|
||||||
|
raw_ip[2] ^ magic[2],
|
||||||
|
raw_ip[3] ^ magic[3],
|
||||||
|
];
|
||||||
|
IpAddr::V4(Ipv4Addr::new(ip[0], ip[1], ip[2], ip[3]))
|
||||||
|
}
|
||||||
|
0x02 => {
|
||||||
|
let mut ip = [0u8; 16];
|
||||||
|
let xor_key = [magic.as_slice(), txid].concat();
|
||||||
|
for (i, b) in raw_ip.iter().enumerate().take(16) {
|
||||||
|
ip[i] = *b ^ xor_key[i];
|
||||||
|
}
|
||||||
|
IpAddr::V6(Ipv6Addr::from(ip))
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
idx += (alen + 3) & !3;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
match family_byte {
|
||||||
|
0x01 => IpAddr::V4(Ipv4Addr::new(raw_ip[0], raw_ip[1], raw_ip[2], raw_ip[3])),
|
||||||
|
0x02 => IpAddr::V6(Ipv6Addr::from(<[u8; 16]>::try_from(raw_ip).unwrap())),
|
||||||
|
_ => {
|
||||||
|
idx += (alen + 3) & !3;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let reflected_addr = SocketAddr::new(reflected_ip, port);
|
||||||
|
let local_addr = socket
|
||||||
|
.local_addr()
|
||||||
|
.map_err(|e| ProxyError::Proxy(format!("STUN local_addr failed: {e}")))?;
|
||||||
|
|
||||||
|
return Ok(Some(StunProbeResult {
|
||||||
|
local_addr,
|
||||||
|
reflected_addr,
|
||||||
|
family,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
idx += (alen + 3) & !3;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn resolve_stun_addr(stun_addr: &str, family: IpFamily) -> Result<Option<SocketAddr>> {
|
||||||
|
if let Ok(addr) = stun_addr.parse::<SocketAddr>() {
|
||||||
|
return Ok(match (addr.is_ipv4(), family) {
|
||||||
|
(true, IpFamily::V4) | (false, IpFamily::V6) => Some(addr),
|
||||||
|
_ => None,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let addrs = lookup_host(stun_addr)
|
||||||
|
.await
|
||||||
|
.map_err(|e| ProxyError::Proxy(format!("STUN resolve failed: {e}")))?;
|
||||||
|
|
||||||
|
let target = addrs
|
||||||
|
.filter(|a| match (a.is_ipv4(), family) {
|
||||||
|
(true, IpFamily::V4) => true,
|
||||||
|
(false, IpFamily::V6) => true,
|
||||||
|
_ => false,
|
||||||
|
})
|
||||||
|
.next();
|
||||||
|
Ok(target)
|
||||||
|
}
|
||||||
@@ -160,6 +160,12 @@ pub fn prepare_tg_nonce(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Encrypt the outgoing nonce for Telegram
|
/// Encrypt the outgoing nonce for Telegram
|
||||||
|
/// Legacy helper — **do not use**.
|
||||||
|
/// WARNING: logic diverges from Python/C reference (SHA256 of 48 bytes, IV from head).
|
||||||
|
/// Kept only to avoid breaking external callers; prefer `encrypt_tg_nonce_with_ciphers`.
|
||||||
|
#[deprecated(
|
||||||
|
note = "Incorrect MTProto obfuscation KDF; use proxy::handshake::encrypt_tg_nonce_with_ciphers"
|
||||||
|
)]
|
||||||
pub fn encrypt_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
|
pub fn encrypt_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
|
||||||
let key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
let key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
||||||
let enc_key = sha256(key_iv);
|
let enc_key = sha256(key_iv);
|
||||||
@@ -208,4 +214,4 @@ mod tests {
|
|||||||
assert!(is_valid_nonce(&nonce));
|
assert!(is_valid_nonce(&nonce));
|
||||||
assert_eq!(nonce.len(), HANDSHAKE_LEN);
|
assert_eq!(nonce.len(), HANDSHAKE_LEN);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,6 +8,8 @@ use crate::crypto::{sha256_hmac, SecureRandom};
|
|||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
use super::constants::*;
|
use super::constants::*;
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
use num_bigint::BigUint;
|
||||||
|
use num_traits::One;
|
||||||
|
|
||||||
// ============= Public Constants =============
|
// ============= Public Constants =============
|
||||||
|
|
||||||
@@ -311,13 +313,27 @@ pub fn validate_tls_handshake(
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn curve25519_prime() -> BigUint {
|
||||||
|
(BigUint::one() << 255) - BigUint::from(19u32)
|
||||||
|
}
|
||||||
|
|
||||||
/// Generate a fake X25519 public key for TLS
|
/// Generate a fake X25519 public key for TLS
|
||||||
///
|
///
|
||||||
/// This generates random bytes that look like a valid X25519 public key.
|
/// Produces a quadratic residue mod p = 2^255 - 19 by computing n² mod p,
|
||||||
/// Since we're not doing real TLS, the actual cryptographic properties don't matter.
|
/// which matches Python/C behavior and avoids DPI fingerprinting.
|
||||||
pub fn gen_fake_x25519_key(rng: &SecureRandom) -> [u8; 32] {
|
pub fn gen_fake_x25519_key(rng: &SecureRandom) -> [u8; 32] {
|
||||||
let bytes = rng.bytes(32);
|
let mut n_bytes = [0u8; 32];
|
||||||
bytes.try_into().unwrap()
|
n_bytes.copy_from_slice(&rng.bytes(32));
|
||||||
|
|
||||||
|
let n = BigUint::from_bytes_le(&n_bytes);
|
||||||
|
let p = curve25519_prime();
|
||||||
|
let pk = (&n * &n) % &p;
|
||||||
|
|
||||||
|
let mut out = pk.to_bytes_le();
|
||||||
|
out.resize(32, 0);
|
||||||
|
let mut result = [0u8; 32];
|
||||||
|
result.copy_from_slice(&out[..32]);
|
||||||
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Build TLS ServerHello response
|
/// Build TLS ServerHello response
|
||||||
@@ -498,6 +514,17 @@ mod tests {
|
|||||||
assert_eq!(key2.len(), 32);
|
assert_eq!(key2.len(), 32);
|
||||||
assert_ne!(key1, key2); // Should be random
|
assert_ne!(key1, key2); // Should be random
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_fake_x25519_key_is_quadratic_residue() {
|
||||||
|
let rng = SecureRandom::new();
|
||||||
|
let key = gen_fake_x25519_key(&rng);
|
||||||
|
let p = curve25519_prime();
|
||||||
|
let k_num = BigUint::from_bytes_le(&key);
|
||||||
|
let exponent = (&p - BigUint::one()) >> 1;
|
||||||
|
let legendre = k_num.modpow(&exponent, &p);
|
||||||
|
assert_eq!(legendre, BigUint::one());
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_tls_extension_builder() {
|
fn test_tls_extension_builder() {
|
||||||
@@ -641,4 +668,4 @@ mod tests {
|
|||||||
// Should return None (no match) but not panic
|
// Should return None (no match) but not panic
|
||||||
assert!(result.is_none());
|
assert!(result.is_none());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
//! Client Handler
|
//! Client Handler
|
||||||
|
|
||||||
|
use std::future::Future;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite};
|
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite};
|
||||||
@@ -8,9 +10,21 @@ use tokio::net::TcpStream;
|
|||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
use tracing::{debug, warn};
|
use tracing::{debug, warn};
|
||||||
|
|
||||||
|
/// Post-handshake future (relay phase, runs outside handshake timeout)
|
||||||
|
type PostHandshakeFuture = Pin<Box<dyn Future<Output = Result<()>> + Send>>;
|
||||||
|
|
||||||
|
/// Result of the handshake phase
|
||||||
|
enum HandshakeOutcome {
|
||||||
|
/// Handshake succeeded, relay work to do (outside timeout)
|
||||||
|
NeedsRelay(PostHandshakeFuture),
|
||||||
|
/// Already fully handled (bad client masking, etc.)
|
||||||
|
Handled,
|
||||||
|
}
|
||||||
|
|
||||||
use crate::config::ProxyConfig;
|
use crate::config::ProxyConfig;
|
||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
use crate::error::{HandshakeResult, ProxyError, Result};
|
use crate::error::{HandshakeResult, ProxyError, Result};
|
||||||
|
use crate::ip_tracker::UserIpTracker;
|
||||||
use crate::protocol::constants::*;
|
use crate::protocol::constants::*;
|
||||||
use crate::protocol::tls;
|
use crate::protocol::tls;
|
||||||
use crate::stats::{ReplayChecker, Stats};
|
use crate::stats::{ReplayChecker, Stats};
|
||||||
@@ -23,6 +37,160 @@ use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle
|
|||||||
use crate::proxy::masking::handle_bad_client;
|
use crate::proxy::masking::handle_bad_client;
|
||||||
use crate::proxy::middle_relay::handle_via_middle_proxy;
|
use crate::proxy::middle_relay::handle_via_middle_proxy;
|
||||||
|
|
||||||
|
pub async fn handle_client_stream<S>(
|
||||||
|
mut stream: S,
|
||||||
|
peer: SocketAddr,
|
||||||
|
config: Arc<ProxyConfig>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
) -> Result<()>
|
||||||
|
where
|
||||||
|
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
|
||||||
|
{
|
||||||
|
stats.increment_connects_all();
|
||||||
|
debug!(peer = %peer, "New connection (generic stream)");
|
||||||
|
|
||||||
|
let handshake_timeout = Duration::from_secs(config.timeouts.client_handshake);
|
||||||
|
let stats_for_timeout = stats.clone();
|
||||||
|
|
||||||
|
// For non-TCP streams, use a synthetic local address
|
||||||
|
let local_addr: SocketAddr = format!("0.0.0.0:{}", config.server.port)
|
||||||
|
.parse()
|
||||||
|
.unwrap_or_else(|_| "0.0.0.0:443".parse().unwrap());
|
||||||
|
|
||||||
|
// Phase 1: handshake (with timeout)
|
||||||
|
let outcome = match timeout(handshake_timeout, async {
|
||||||
|
let mut first_bytes = [0u8; 5];
|
||||||
|
stream.read_exact(&mut first_bytes).await?;
|
||||||
|
|
||||||
|
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
||||||
|
debug!(peer = %peer, is_tls = is_tls, "Handshake type detected");
|
||||||
|
|
||||||
|
if is_tls {
|
||||||
|
let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
|
||||||
|
|
||||||
|
if tls_len < 512 {
|
||||||
|
debug!(peer = %peer, tls_len = tls_len, "TLS handshake too short");
|
||||||
|
stats.increment_connects_bad();
|
||||||
|
let (reader, writer) = tokio::io::split(stream);
|
||||||
|
handle_bad_client(reader, writer, &first_bytes, &config).await;
|
||||||
|
return Ok(HandshakeOutcome::Handled);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut handshake = vec![0u8; 5 + tls_len];
|
||||||
|
handshake[..5].copy_from_slice(&first_bytes);
|
||||||
|
stream.read_exact(&mut handshake[5..]).await?;
|
||||||
|
|
||||||
|
let (read_half, write_half) = tokio::io::split(stream);
|
||||||
|
|
||||||
|
let (mut tls_reader, tls_writer, _tls_user) = match handle_tls_handshake(
|
||||||
|
&handshake, read_half, write_half, peer,
|
||||||
|
&config, &replay_checker, &rng,
|
||||||
|
).await {
|
||||||
|
HandshakeResult::Success(result) => result,
|
||||||
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
|
stats.increment_connects_bad();
|
||||||
|
handle_bad_client(reader, writer, &handshake, &config).await;
|
||||||
|
return Ok(HandshakeOutcome::Handled);
|
||||||
|
}
|
||||||
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!(peer = %peer, "Reading MTProto handshake through TLS");
|
||||||
|
let mtproto_data = tls_reader.read_exact(HANDSHAKE_LEN).await?;
|
||||||
|
let mtproto_handshake: [u8; HANDSHAKE_LEN] = mtproto_data[..].try_into()
|
||||||
|
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
||||||
|
|
||||||
|
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||||
|
&mtproto_handshake, tls_reader, tls_writer, peer,
|
||||||
|
&config, &replay_checker, true,
|
||||||
|
).await {
|
||||||
|
HandshakeResult::Success(result) => result,
|
||||||
|
HandshakeResult::BadClient { reader: _, writer: _ } => {
|
||||||
|
stats.increment_connects_bad();
|
||||||
|
debug!(peer = %peer, "Valid TLS but invalid MTProto handshake");
|
||||||
|
return Ok(HandshakeOutcome::Handled);
|
||||||
|
}
|
||||||
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||||
|
RunningClientHandler::handle_authenticated_static(
|
||||||
|
crypto_reader, crypto_writer, success,
|
||||||
|
upstream_manager, stats, config, buffer_pool, rng, me_pool,
|
||||||
|
local_addr, peer, ip_tracker.clone(),
|
||||||
|
),
|
||||||
|
)))
|
||||||
|
} else {
|
||||||
|
if !config.general.modes.classic && !config.general.modes.secure {
|
||||||
|
debug!(peer = %peer, "Non-TLS modes disabled");
|
||||||
|
stats.increment_connects_bad();
|
||||||
|
let (reader, writer) = tokio::io::split(stream);
|
||||||
|
handle_bad_client(reader, writer, &first_bytes, &config).await;
|
||||||
|
return Ok(HandshakeOutcome::Handled);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut handshake = [0u8; HANDSHAKE_LEN];
|
||||||
|
handshake[..5].copy_from_slice(&first_bytes);
|
||||||
|
stream.read_exact(&mut handshake[5..]).await?;
|
||||||
|
|
||||||
|
let (read_half, write_half) = tokio::io::split(stream);
|
||||||
|
|
||||||
|
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||||
|
&handshake, read_half, write_half, peer,
|
||||||
|
&config, &replay_checker, false,
|
||||||
|
).await {
|
||||||
|
HandshakeResult::Success(result) => result,
|
||||||
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
|
stats.increment_connects_bad();
|
||||||
|
handle_bad_client(reader, writer, &handshake, &config).await;
|
||||||
|
return Ok(HandshakeOutcome::Handled);
|
||||||
|
}
|
||||||
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||||
|
RunningClientHandler::handle_authenticated_static(
|
||||||
|
crypto_reader,
|
||||||
|
crypto_writer,
|
||||||
|
success,
|
||||||
|
upstream_manager,
|
||||||
|
stats,
|
||||||
|
config,
|
||||||
|
buffer_pool,
|
||||||
|
rng,
|
||||||
|
me_pool,
|
||||||
|
local_addr,
|
||||||
|
peer,
|
||||||
|
ip_tracker.clone(),
|
||||||
|
)
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}).await {
|
||||||
|
Ok(Ok(outcome)) => outcome,
|
||||||
|
Ok(Err(e)) => {
|
||||||
|
debug!(peer = %peer, error = %e, "Handshake failed");
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
stats_for_timeout.increment_handshake_timeouts();
|
||||||
|
debug!(peer = %peer, "Handshake timeout");
|
||||||
|
return Err(ProxyError::TgHandshakeTimeout);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Phase 2: relay (WITHOUT handshake timeout — relay has its own activity timeouts)
|
||||||
|
match outcome {
|
||||||
|
HandshakeOutcome::NeedsRelay(fut) => fut.await,
|
||||||
|
HandshakeOutcome::Handled => Ok(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct ClientHandler;
|
pub struct ClientHandler;
|
||||||
|
|
||||||
pub struct RunningClientHandler {
|
pub struct RunningClientHandler {
|
||||||
@@ -35,6 +203,7 @@ pub struct RunningClientHandler {
|
|||||||
buffer_pool: Arc<BufferPool>,
|
buffer_pool: Arc<BufferPool>,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
me_pool: Option<Arc<MePool>>,
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ClientHandler {
|
impl ClientHandler {
|
||||||
@@ -48,6 +217,7 @@ impl ClientHandler {
|
|||||||
buffer_pool: Arc<BufferPool>,
|
buffer_pool: Arc<BufferPool>,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
me_pool: Option<Arc<MePool>>,
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
) -> RunningClientHandler {
|
) -> RunningClientHandler {
|
||||||
RunningClientHandler {
|
RunningClientHandler {
|
||||||
stream,
|
stream,
|
||||||
@@ -59,6 +229,7 @@ impl ClientHandler {
|
|||||||
buffer_pool,
|
buffer_pool,
|
||||||
rng,
|
rng,
|
||||||
me_pool,
|
me_pool,
|
||||||
|
ip_tracker,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -68,6 +239,7 @@ impl RunningClientHandler {
|
|||||||
self.stats.increment_connects_all();
|
self.stats.increment_connects_all();
|
||||||
|
|
||||||
let peer = self.peer;
|
let peer = self.peer;
|
||||||
|
let ip_tracker = self.ip_tracker.clone();
|
||||||
debug!(peer = %peer, "New connection");
|
debug!(peer = %peer, "New connection");
|
||||||
|
|
||||||
if let Err(e) = configure_client_socket(
|
if let Err(e) = configure_client_socket(
|
||||||
@@ -81,31 +253,34 @@ impl RunningClientHandler {
|
|||||||
let handshake_timeout = Duration::from_secs(self.config.timeouts.client_handshake);
|
let handshake_timeout = Duration::from_secs(self.config.timeouts.client_handshake);
|
||||||
let stats = self.stats.clone();
|
let stats = self.stats.clone();
|
||||||
|
|
||||||
let result = timeout(handshake_timeout, self.do_handshake()).await;
|
// Phase 1: handshake (with timeout)
|
||||||
|
let outcome = match timeout(handshake_timeout, self.do_handshake()).await {
|
||||||
match result {
|
Ok(Ok(outcome)) => outcome,
|
||||||
Ok(Ok(())) => {
|
|
||||||
debug!(peer = %peer, "Connection handled successfully");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Ok(Err(e)) => {
|
Ok(Err(e)) => {
|
||||||
debug!(peer = %peer, error = %e, "Handshake failed");
|
debug!(peer = %peer, error = %e, "Handshake failed");
|
||||||
Err(e)
|
return Err(e);
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
stats.increment_handshake_timeouts();
|
stats.increment_handshake_timeouts();
|
||||||
debug!(peer = %peer, "Handshake timeout");
|
debug!(peer = %peer, "Handshake timeout");
|
||||||
Err(ProxyError::TgHandshakeTimeout)
|
return Err(ProxyError::TgHandshakeTimeout);
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Phase 2: relay (WITHOUT handshake timeout — relay has its own activity timeouts)
|
||||||
|
match outcome {
|
||||||
|
HandshakeOutcome::NeedsRelay(fut) => fut.await,
|
||||||
|
HandshakeOutcome::Handled => Ok(()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_handshake(mut self) -> Result<()> {
|
async fn do_handshake(mut self) -> Result<HandshakeOutcome> {
|
||||||
let mut first_bytes = [0u8; 5];
|
let mut first_bytes = [0u8; 5];
|
||||||
self.stream.read_exact(&mut first_bytes).await?;
|
self.stream.read_exact(&mut first_bytes).await?;
|
||||||
|
|
||||||
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
||||||
let peer = self.peer;
|
let peer = self.peer;
|
||||||
|
let ip_tracker = self.ip_tracker.clone();
|
||||||
|
|
||||||
debug!(peer = %peer, is_tls = is_tls, "Handshake type detected");
|
debug!(peer = %peer, is_tls = is_tls, "Handshake type detected");
|
||||||
|
|
||||||
@@ -116,8 +291,9 @@ impl RunningClientHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_tls_client(mut self, first_bytes: [u8; 5]) -> Result<()> {
|
async fn handle_tls_client(mut self, first_bytes: [u8; 5]) -> Result<HandshakeOutcome> {
|
||||||
let peer = self.peer;
|
let peer = self.peer;
|
||||||
|
let ip_tracker = self.ip_tracker.clone();
|
||||||
|
|
||||||
let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
|
let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
|
||||||
|
|
||||||
@@ -128,7 +304,7 @@ impl RunningClientHandler {
|
|||||||
self.stats.increment_connects_bad();
|
self.stats.increment_connects_bad();
|
||||||
let (reader, writer) = self.stream.into_split();
|
let (reader, writer) = self.stream.into_split();
|
||||||
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
|
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
|
||||||
return Ok(());
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut handshake = vec![0u8; 5 + tls_len];
|
let mut handshake = vec![0u8; 5 + tls_len];
|
||||||
@@ -158,7 +334,7 @@ impl RunningClientHandler {
|
|||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
handle_bad_client(reader, writer, &handshake, &config).await;
|
handle_bad_client(reader, writer, &handshake, &config).await;
|
||||||
return Ok(());
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
};
|
};
|
||||||
@@ -187,35 +363,39 @@ impl RunningClientHandler {
|
|||||||
} => {
|
} => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
debug!(peer = %peer, "Valid TLS but invalid MTProto handshake");
|
debug!(peer = %peer, "Valid TLS but invalid MTProto handshake");
|
||||||
return Ok(());
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
||||||
Self::handle_authenticated_static(
|
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||||
crypto_reader,
|
Self::handle_authenticated_static(
|
||||||
crypto_writer,
|
crypto_reader,
|
||||||
success,
|
crypto_writer,
|
||||||
self.upstream_manager,
|
success,
|
||||||
self.stats,
|
self.upstream_manager,
|
||||||
self.config,
|
self.stats,
|
||||||
buffer_pool,
|
self.config,
|
||||||
self.rng,
|
buffer_pool,
|
||||||
self.me_pool,
|
self.rng,
|
||||||
local_addr,
|
self.me_pool,
|
||||||
)
|
local_addr,
|
||||||
.await
|
peer,
|
||||||
|
self.ip_tracker,
|
||||||
|
),
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_direct_client(mut self, first_bytes: [u8; 5]) -> Result<()> {
|
async fn handle_direct_client(mut self, first_bytes: [u8; 5]) -> Result<HandshakeOutcome> {
|
||||||
let peer = self.peer;
|
let peer = self.peer;
|
||||||
|
let ip_tracker = self.ip_tracker.clone();
|
||||||
|
|
||||||
if !self.config.general.modes.classic && !self.config.general.modes.secure {
|
if !self.config.general.modes.classic && !self.config.general.modes.secure {
|
||||||
debug!(peer = %peer, "Non-TLS modes disabled");
|
debug!(peer = %peer, "Non-TLS modes disabled");
|
||||||
self.stats.increment_connects_bad();
|
self.stats.increment_connects_bad();
|
||||||
let (reader, writer) = self.stream.into_split();
|
let (reader, writer) = self.stream.into_split();
|
||||||
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
|
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
|
||||||
return Ok(());
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut handshake = [0u8; HANDSHAKE_LEN];
|
let mut handshake = [0u8; HANDSHAKE_LEN];
|
||||||
@@ -245,24 +425,27 @@ impl RunningClientHandler {
|
|||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
handle_bad_client(reader, writer, &handshake, &config).await;
|
handle_bad_client(reader, writer, &handshake, &config).await;
|
||||||
return Ok(());
|
return Ok(HandshakeOutcome::Handled);
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
||||||
Self::handle_authenticated_static(
|
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||||
crypto_reader,
|
Self::handle_authenticated_static(
|
||||||
crypto_writer,
|
crypto_reader,
|
||||||
success,
|
crypto_writer,
|
||||||
self.upstream_manager,
|
success,
|
||||||
self.stats,
|
self.upstream_manager,
|
||||||
self.config,
|
self.stats,
|
||||||
buffer_pool,
|
self.config,
|
||||||
self.rng,
|
buffer_pool,
|
||||||
self.me_pool,
|
self.rng,
|
||||||
local_addr,
|
self.me_pool,
|
||||||
)
|
local_addr,
|
||||||
.await
|
peer,
|
||||||
|
self.ip_tracker,
|
||||||
|
),
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Main dispatch after successful handshake.
|
/// Main dispatch after successful handshake.
|
||||||
@@ -280,6 +463,8 @@ impl RunningClientHandler {
|
|||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
me_pool: Option<Arc<MePool>>,
|
me_pool: Option<Arc<MePool>>,
|
||||||
local_addr: SocketAddr,
|
local_addr: SocketAddr,
|
||||||
|
peer_addr: SocketAddr,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
@@ -287,11 +472,36 @@ impl RunningClientHandler {
|
|||||||
{
|
{
|
||||||
let user = &success.user;
|
let user = &success.user;
|
||||||
|
|
||||||
if let Err(e) = Self::check_user_limits_static(user, &config, &stats) {
|
if let Err(e) = Self::check_user_limits_static(user, &config, &stats, peer_addr, &ip_tracker).await {
|
||||||
warn!(user = %user, error = %e, "User limit exceeded");
|
warn!(user = %user, error = %e, "User limit exceeded");
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IP Cleanup Guard: автоматически удаляет IP при выходе из scope
|
||||||
|
struct IpCleanupGuard {
|
||||||
|
tracker: Arc<UserIpTracker>,
|
||||||
|
user: String,
|
||||||
|
ip: std::net::IpAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for IpCleanupGuard {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let tracker = self.tracker.clone();
|
||||||
|
let user = self.user.clone();
|
||||||
|
let ip = self.ip;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
tracker.remove_ip(&user, ip).await;
|
||||||
|
debug!(user = %user, ip = %ip, "IP cleaned up on disconnect");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let _cleanup = IpCleanupGuard {
|
||||||
|
tracker: ip_tracker,
|
||||||
|
user: user.clone(),
|
||||||
|
ip: peer_addr.ip(),
|
||||||
|
};
|
||||||
|
|
||||||
// Decide: middle proxy or direct
|
// Decide: middle proxy or direct
|
||||||
if config.general.use_middle_proxy {
|
if config.general.use_middle_proxy {
|
||||||
if let Some(ref pool) = me_pool {
|
if let Some(ref pool) = me_pool {
|
||||||
@@ -304,6 +514,7 @@ impl RunningClientHandler {
|
|||||||
config,
|
config,
|
||||||
buffer_pool,
|
buffer_pool,
|
||||||
local_addr,
|
local_addr,
|
||||||
|
rng,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
@@ -324,7 +535,13 @@ impl RunningClientHandler {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_user_limits_static(user: &str, config: &ProxyConfig, stats: &Stats) -> Result<()> {
|
async fn check_user_limits_static(
|
||||||
|
user: &str,
|
||||||
|
config: &ProxyConfig,
|
||||||
|
stats: &Stats,
|
||||||
|
peer_addr: SocketAddr,
|
||||||
|
ip_tracker: &UserIpTracker,
|
||||||
|
) -> Result<()> {
|
||||||
if let Some(expiration) = config.access.user_expirations.get(user) {
|
if let Some(expiration) = config.access.user_expirations.get(user) {
|
||||||
if chrono::Utc::now() > *expiration {
|
if chrono::Utc::now() > *expiration {
|
||||||
return Err(ProxyError::UserExpired {
|
return Err(ProxyError::UserExpired {
|
||||||
@@ -333,6 +550,19 @@ impl RunningClientHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IP limit check
|
||||||
|
if let Err(reason) = ip_tracker.check_and_add(user, peer_addr.ip()).await {
|
||||||
|
warn!(
|
||||||
|
user = %user,
|
||||||
|
ip = %peer_addr.ip(),
|
||||||
|
reason = %reason,
|
||||||
|
"IP limit exceeded"
|
||||||
|
);
|
||||||
|
return Err(ProxyError::ConnectionLimitExceeded {
|
||||||
|
user: user.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(limit) = config.access.user_max_tcp_conns.get(user) {
|
if let Some(limit) = config.access.user_max_tcp_conns.get(user) {
|
||||||
if stats.get_user_curr_connects(user) >= *limit as u64 {
|
if stats.get_user_curr_connects(user) >= *limit as u64 {
|
||||||
return Err(ProxyError::ConnectionLimitExceeded {
|
return Err(ProxyError::ConnectionLimitExceeded {
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
use std::fs::OpenOptions;
|
||||||
|
use std::io::Write;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@@ -78,7 +80,8 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
||||||
let datacenters = if config.general.prefer_ipv6 {
|
let prefer_v6 = config.network.prefer == 6 && config.network.ipv6.unwrap_or(true);
|
||||||
|
let datacenters = if prefer_v6 {
|
||||||
&*TG_DATACENTERS_V6
|
&*TG_DATACENTERS_V6
|
||||||
} else {
|
} else {
|
||||||
&*TG_DATACENTERS_V4
|
&*TG_DATACENTERS_V4
|
||||||
@@ -87,17 +90,24 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
|||||||
let num_dcs = datacenters.len();
|
let num_dcs = datacenters.len();
|
||||||
|
|
||||||
let dc_key = dc_idx.to_string();
|
let dc_key = dc_idx.to_string();
|
||||||
if let Some(addr_str) = config.dc_overrides.get(&dc_key) {
|
if let Some(addrs) = config.dc_overrides.get(&dc_key) {
|
||||||
match addr_str.parse::<SocketAddr>() {
|
let mut parsed = Vec::new();
|
||||||
Ok(addr) => {
|
for addr_str in addrs {
|
||||||
debug!(dc_idx = dc_idx, addr = %addr, "Using DC override from config");
|
match addr_str.parse::<SocketAddr>() {
|
||||||
return Ok(addr);
|
Ok(addr) => parsed.push(addr),
|
||||||
}
|
Err(_) => warn!(dc_idx = dc_idx, addr_str = %addr_str, "Invalid DC override address in config, ignoring"),
|
||||||
Err(_) => {
|
|
||||||
warn!(dc_idx = dc_idx, addr_str = %addr_str,
|
|
||||||
"Invalid DC override address in config, ignoring");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(addr) = parsed
|
||||||
|
.iter()
|
||||||
|
.find(|a| a.is_ipv6() == prefer_v6)
|
||||||
|
.or_else(|| parsed.first())
|
||||||
|
.copied()
|
||||||
|
{
|
||||||
|
debug!(dc_idx = dc_idx, addr = %addr, count = parsed.len(), "Using DC override from config");
|
||||||
|
return Ok(addr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let abs_dc = dc_idx.unsigned_abs() as usize;
|
let abs_dc = dc_idx.unsigned_abs() as usize;
|
||||||
@@ -105,6 +115,16 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
|||||||
return Ok(SocketAddr::new(datacenters[abs_dc - 1], TG_DATACENTER_PORT));
|
return Ok(SocketAddr::new(datacenters[abs_dc - 1], TG_DATACENTER_PORT));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unknown DC requested by client without override: log and fall back.
|
||||||
|
if !config.dc_overrides.contains_key(&dc_key) {
|
||||||
|
warn!(dc_idx = dc_idx, "Requested non-standard DC with no override; falling back to default cluster");
|
||||||
|
if let Some(path) = &config.general.unknown_dc_log_path {
|
||||||
|
if let Ok(mut file) = OpenOptions::new().create(true).append(true).open(path) {
|
||||||
|
let _ = writeln!(file, "dc_idx={dc_idx}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let default_dc = config.default_dc.unwrap_or(2) as usize;
|
let default_dc = config.default_dc.unwrap_or(2) as usize;
|
||||||
let fallback_idx = if default_dc >= 1 && default_dc <= num_dcs {
|
let fallback_idx = if default_dc >= 1 && default_dc <= num_dcs {
|
||||||
default_dc - 1
|
default_dc - 1
|
||||||
@@ -139,6 +159,8 @@ async fn do_tg_handshake_static(
|
|||||||
success.dc_idx,
|
success.dc_idx,
|
||||||
&success.dec_key,
|
&success.dec_key,
|
||||||
success.dec_iv,
|
success.dec_iv,
|
||||||
|
&success.enc_key,
|
||||||
|
success.enc_iv,
|
||||||
rng,
|
rng,
|
||||||
config.general.fast_mode,
|
config.general.fast_mode,
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ where
|
|||||||
let digest = &handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN];
|
let digest = &handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN];
|
||||||
let digest_half = &digest[..tls::TLS_DIGEST_HALF_LEN];
|
let digest_half = &digest[..tls::TLS_DIGEST_HALF_LEN];
|
||||||
|
|
||||||
if replay_checker.check_tls_digest(digest_half) {
|
if replay_checker.check_and_add_tls_digest(digest_half) {
|
||||||
warn!(peer = %peer, "TLS replay attack detected (duplicate digest)");
|
warn!(peer = %peer, "TLS replay attack detected (duplicate digest)");
|
||||||
return HandshakeResult::BadClient { reader, writer };
|
return HandshakeResult::BadClient { reader, writer };
|
||||||
}
|
}
|
||||||
@@ -122,8 +122,6 @@ where
|
|||||||
return HandshakeResult::Error(ProxyError::Io(e));
|
return HandshakeResult::Error(ProxyError::Io(e));
|
||||||
}
|
}
|
||||||
|
|
||||||
replay_checker.add_tls_digest(digest_half);
|
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
peer = %peer,
|
peer = %peer,
|
||||||
user = %validation.user,
|
user = %validation.user,
|
||||||
@@ -155,7 +153,7 @@ where
|
|||||||
|
|
||||||
let dec_prekey_iv = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
|
let dec_prekey_iv = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
|
||||||
|
|
||||||
if replay_checker.check_handshake(dec_prekey_iv) {
|
if replay_checker.check_and_add_handshake(dec_prekey_iv) {
|
||||||
warn!(peer = %peer, "MTProto replay attack detected");
|
warn!(peer = %peer, "MTProto replay attack detected");
|
||||||
return HandshakeResult::BadClient { reader, writer };
|
return HandshakeResult::BadClient { reader, writer };
|
||||||
}
|
}
|
||||||
@@ -216,8 +214,6 @@ where
|
|||||||
|
|
||||||
let enc_iv = u128::from_be_bytes(enc_iv_bytes.try_into().unwrap());
|
let enc_iv = u128::from_be_bytes(enc_iv_bytes.try_into().unwrap());
|
||||||
|
|
||||||
replay_checker.add_handshake(dec_prekey_iv);
|
|
||||||
|
|
||||||
let encryptor = AesCtr::new(&enc_key, enc_iv);
|
let encryptor = AesCtr::new(&enc_key, enc_iv);
|
||||||
|
|
||||||
let success = HandshakeSuccess {
|
let success = HandshakeSuccess {
|
||||||
@@ -256,8 +252,10 @@ where
|
|||||||
pub fn generate_tg_nonce(
|
pub fn generate_tg_nonce(
|
||||||
proto_tag: ProtoTag,
|
proto_tag: ProtoTag,
|
||||||
dc_idx: i16,
|
dc_idx: i16,
|
||||||
client_dec_key: &[u8; 32],
|
_client_dec_key: &[u8; 32],
|
||||||
client_dec_iv: u128,
|
_client_dec_iv: u128,
|
||||||
|
client_enc_key: &[u8; 32],
|
||||||
|
client_enc_iv: u128,
|
||||||
rng: &SecureRandom,
|
rng: &SecureRandom,
|
||||||
fast_mode: bool,
|
fast_mode: bool,
|
||||||
) -> ([u8; HANDSHAKE_LEN], [u8; 32], u128, [u8; 32], u128) {
|
) -> ([u8; HANDSHAKE_LEN], [u8; 32], u128, [u8; 32], u128) {
|
||||||
@@ -278,9 +276,11 @@ pub fn generate_tg_nonce(
|
|||||||
nonce[DC_IDX_POS..DC_IDX_POS + 2].copy_from_slice(&dc_idx.to_le_bytes());
|
nonce[DC_IDX_POS..DC_IDX_POS + 2].copy_from_slice(&dc_idx.to_le_bytes());
|
||||||
|
|
||||||
if fast_mode {
|
if fast_mode {
|
||||||
nonce[SKIP_LEN..SKIP_LEN + KEY_LEN].copy_from_slice(client_dec_key);
|
let mut key_iv = Vec::with_capacity(KEY_LEN + IV_LEN);
|
||||||
nonce[SKIP_LEN + KEY_LEN..SKIP_LEN + KEY_LEN + IV_LEN]
|
key_iv.extend_from_slice(client_enc_key);
|
||||||
.copy_from_slice(&client_dec_iv.to_be_bytes());
|
key_iv.extend_from_slice(&client_enc_iv.to_be_bytes());
|
||||||
|
key_iv.reverse(); // Python/C behavior: reversed enc_key+enc_iv in nonce
|
||||||
|
nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN].copy_from_slice(&key_iv);
|
||||||
}
|
}
|
||||||
|
|
||||||
let enc_key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
let enc_key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
||||||
@@ -332,10 +332,21 @@ mod tests {
|
|||||||
fn test_generate_tg_nonce() {
|
fn test_generate_tg_nonce() {
|
||||||
let client_dec_key = [0x42u8; 32];
|
let client_dec_key = [0x42u8; 32];
|
||||||
let client_dec_iv = 12345u128;
|
let client_dec_iv = 12345u128;
|
||||||
|
let client_enc_key = [0x24u8; 32];
|
||||||
|
let client_enc_iv = 54321u128;
|
||||||
|
|
||||||
let rng = SecureRandom::new();
|
let rng = SecureRandom::new();
|
||||||
let (nonce, _tg_enc_key, _tg_enc_iv, _tg_dec_key, _tg_dec_iv) =
|
let (nonce, _tg_enc_key, _tg_enc_iv, _tg_dec_key, _tg_dec_iv) =
|
||||||
generate_tg_nonce(ProtoTag::Secure, 2, &client_dec_key, client_dec_iv, &rng, false);
|
generate_tg_nonce(
|
||||||
|
ProtoTag::Secure,
|
||||||
|
2,
|
||||||
|
&client_dec_key,
|
||||||
|
client_dec_iv,
|
||||||
|
&client_enc_key,
|
||||||
|
client_enc_iv,
|
||||||
|
&rng,
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
|
||||||
assert_eq!(nonce.len(), HANDSHAKE_LEN);
|
assert_eq!(nonce.len(), HANDSHAKE_LEN);
|
||||||
|
|
||||||
@@ -347,10 +358,21 @@ mod tests {
|
|||||||
fn test_encrypt_tg_nonce() {
|
fn test_encrypt_tg_nonce() {
|
||||||
let client_dec_key = [0x42u8; 32];
|
let client_dec_key = [0x42u8; 32];
|
||||||
let client_dec_iv = 12345u128;
|
let client_dec_iv = 12345u128;
|
||||||
|
let client_enc_key = [0x24u8; 32];
|
||||||
|
let client_enc_iv = 54321u128;
|
||||||
|
|
||||||
let rng = SecureRandom::new();
|
let rng = SecureRandom::new();
|
||||||
let (nonce, _, _, _, _) =
|
let (nonce, _, _, _, _) =
|
||||||
generate_tg_nonce(ProtoTag::Secure, 2, &client_dec_key, client_dec_iv, &rng, false);
|
generate_tg_nonce(
|
||||||
|
ProtoTag::Secure,
|
||||||
|
2,
|
||||||
|
&client_dec_key,
|
||||||
|
client_dec_iv,
|
||||||
|
&client_enc_key,
|
||||||
|
client_enc_iv,
|
||||||
|
&rng,
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
|
||||||
let encrypted = encrypt_tg_nonce(&nonce);
|
let encrypted = encrypt_tg_nonce(&nonce);
|
||||||
|
|
||||||
@@ -379,4 +401,4 @@ mod tests {
|
|||||||
drop(success);
|
drop(success);
|
||||||
// Drop impl zeroizes key material without panic
|
// Drop impl zeroizes key material without panic
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
|||||||
use tracing::{debug, info, trace};
|
use tracing::{debug, info, trace};
|
||||||
|
|
||||||
use crate::config::ProxyConfig;
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
use crate::protocol::constants::*;
|
use crate::protocol::constants::*;
|
||||||
use crate::proxy::handshake::HandshakeSuccess;
|
use crate::proxy::handshake::HandshakeSuccess;
|
||||||
@@ -21,6 +22,7 @@ pub(crate) async fn handle_via_middle_proxy<R, W>(
|
|||||||
_config: Arc<ProxyConfig>,
|
_config: Arc<ProxyConfig>,
|
||||||
_buffer_pool: Arc<BufferPool>,
|
_buffer_pool: Arc<BufferPool>,
|
||||||
local_addr: SocketAddr,
|
local_addr: SocketAddr,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
@@ -58,16 +60,23 @@ where
|
|||||||
tokio::select! {
|
tokio::select! {
|
||||||
client_frame = read_client_payload(&mut crypto_reader, proto_tag) => {
|
client_frame = read_client_payload(&mut crypto_reader, proto_tag) => {
|
||||||
match client_frame {
|
match client_frame {
|
||||||
Ok(Some(payload)) => {
|
Ok(Some((payload, quickack))) => {
|
||||||
trace!(conn_id, bytes = payload.len(), "C->ME frame");
|
trace!(conn_id, bytes = payload.len(), "C->ME frame");
|
||||||
stats.add_user_octets_from(&user, payload.len() as u64);
|
stats.add_user_octets_from(&user, payload.len() as u64);
|
||||||
|
let mut flags = proto_flags;
|
||||||
|
if quickack {
|
||||||
|
flags |= RPC_FLAG_QUICKACK;
|
||||||
|
}
|
||||||
|
if payload.len() >= 8 && payload[..8].iter().all(|b| *b == 0) {
|
||||||
|
flags |= RPC_FLAG_NOT_ENCRYPTED;
|
||||||
|
}
|
||||||
me_pool.send_proxy_req(
|
me_pool.send_proxy_req(
|
||||||
conn_id,
|
conn_id,
|
||||||
success.dc_idx,
|
success.dc_idx,
|
||||||
peer,
|
peer,
|
||||||
translated_local_addr,
|
translated_local_addr,
|
||||||
&payload,
|
&payload,
|
||||||
proto_flags,
|
flags,
|
||||||
).await?;
|
).await?;
|
||||||
}
|
}
|
||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
@@ -83,7 +92,7 @@ where
|
|||||||
Some(MeResponse::Data { flags, data }) => {
|
Some(MeResponse::Data { flags, data }) => {
|
||||||
trace!(conn_id, bytes = data.len(), flags, "ME->C data");
|
trace!(conn_id, bytes = data.len(), flags, "ME->C data");
|
||||||
stats.add_user_octets_to(&user, data.len() as u64);
|
stats.add_user_octets_to(&user, data.len() as u64);
|
||||||
write_client_payload(&mut crypto_writer, proto_tag, flags, &data).await?;
|
write_client_payload(&mut crypto_writer, proto_tag, flags, &data, rng.as_ref()).await?;
|
||||||
}
|
}
|
||||||
Some(MeResponse::Ack(confirm)) => {
|
Some(MeResponse::Ack(confirm)) => {
|
||||||
trace!(conn_id, confirm, "ME->C quickack");
|
trace!(conn_id, confirm, "ME->C quickack");
|
||||||
@@ -111,11 +120,11 @@ where
|
|||||||
async fn read_client_payload<R>(
|
async fn read_client_payload<R>(
|
||||||
client_reader: &mut CryptoReader<R>,
|
client_reader: &mut CryptoReader<R>,
|
||||||
proto_tag: ProtoTag,
|
proto_tag: ProtoTag,
|
||||||
) -> Result<Option<Vec<u8>>>
|
) -> Result<Option<(Vec<u8>, bool)>>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
{
|
{
|
||||||
let len = match proto_tag {
|
let (len, quickack) = match proto_tag {
|
||||||
ProtoTag::Abridged => {
|
ProtoTag::Abridged => {
|
||||||
let mut first = [0u8; 1];
|
let mut first = [0u8; 1];
|
||||||
match client_reader.read_exact(&mut first).await {
|
match client_reader.read_exact(&mut first).await {
|
||||||
@@ -124,6 +133,7 @@ where
|
|||||||
Err(e) => return Err(ProxyError::Io(e)),
|
Err(e) => return Err(ProxyError::Io(e)),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let quickack = (first[0] & 0x80) != 0;
|
||||||
let len_words = if (first[0] & 0x7f) == 0x7f {
|
let len_words = if (first[0] & 0x7f) == 0x7f {
|
||||||
let mut ext = [0u8; 3];
|
let mut ext = [0u8; 3];
|
||||||
client_reader
|
client_reader
|
||||||
@@ -135,9 +145,10 @@ where
|
|||||||
(first[0] & 0x7f) as usize
|
(first[0] & 0x7f) as usize
|
||||||
};
|
};
|
||||||
|
|
||||||
len_words
|
let len = len_words
|
||||||
.checked_mul(4)
|
.checked_mul(4)
|
||||||
.ok_or_else(|| ProxyError::Proxy("Abridged frame length overflow".into()))?
|
.ok_or_else(|| ProxyError::Proxy("Abridged frame length overflow".into()))?;
|
||||||
|
(len, quickack)
|
||||||
}
|
}
|
||||||
ProtoTag::Intermediate | ProtoTag::Secure => {
|
ProtoTag::Intermediate | ProtoTag::Secure => {
|
||||||
let mut len_buf = [0u8; 4];
|
let mut len_buf = [0u8; 4];
|
||||||
@@ -146,7 +157,8 @@ where
|
|||||||
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None),
|
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None),
|
||||||
Err(e) => return Err(ProxyError::Io(e)),
|
Err(e) => return Err(ProxyError::Io(e)),
|
||||||
}
|
}
|
||||||
(u32::from_le_bytes(len_buf) & 0x7fff_ffff) as usize
|
let quickack = (len_buf[3] & 0x80) != 0;
|
||||||
|
((u32::from_le_bytes(len_buf) & 0x7fff_ffff) as usize, quickack)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -159,7 +171,15 @@ where
|
|||||||
.read_exact(&mut payload)
|
.read_exact(&mut payload)
|
||||||
.await
|
.await
|
||||||
.map_err(ProxyError::Io)?;
|
.map_err(ProxyError::Io)?;
|
||||||
Ok(Some(payload))
|
|
||||||
|
// Secure Intermediate: remove random padding (last len%4 bytes)
|
||||||
|
if proto_tag == ProtoTag::Secure {
|
||||||
|
let rem = len % 4;
|
||||||
|
if rem != 0 && payload.len() >= rem {
|
||||||
|
payload.truncate(len - rem);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Some((payload, quickack)))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn write_client_payload<W>(
|
async fn write_client_payload<W>(
|
||||||
@@ -167,6 +187,7 @@ async fn write_client_payload<W>(
|
|||||||
proto_tag: ProtoTag,
|
proto_tag: ProtoTag,
|
||||||
flags: u32,
|
flags: u32,
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
|
rng: &SecureRandom,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
W: AsyncWrite + Unpin + Send + 'static,
|
W: AsyncWrite + Unpin + Send + 'static,
|
||||||
@@ -215,7 +236,12 @@ where
|
|||||||
.map_err(ProxyError::Io)?;
|
.map_err(ProxyError::Io)?;
|
||||||
}
|
}
|
||||||
ProtoTag::Intermediate | ProtoTag::Secure => {
|
ProtoTag::Intermediate | ProtoTag::Secure => {
|
||||||
let mut len = data.len() as u32;
|
let padding_len = if proto_tag == ProtoTag::Secure {
|
||||||
|
(rng.bytes(1)[0] % 4) as usize
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
let mut len = (data.len() + padding_len) as u32;
|
||||||
if quickack {
|
if quickack {
|
||||||
len |= 0x8000_0000;
|
len |= 0x8000_0000;
|
||||||
}
|
}
|
||||||
@@ -227,10 +253,24 @@ where
|
|||||||
.write_all(data)
|
.write_all(data)
|
||||||
.await
|
.await
|
||||||
.map_err(ProxyError::Io)?;
|
.map_err(ProxyError::Io)?;
|
||||||
|
if padding_len > 0 {
|
||||||
|
let pad = rng.bytes(padding_len);
|
||||||
|
client_writer
|
||||||
|
.write_all(&pad)
|
||||||
|
.await
|
||||||
|
.map_err(ProxyError::Io)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
client_writer.flush().await.map_err(ProxyError::Io)
|
// Avoid unconditional per-frame flush (throughput killer on large downloads).
|
||||||
|
// Flush only when low-latency ack semantics are requested or when
|
||||||
|
// CryptoWriter has buffered pending ciphertext that must be drained.
|
||||||
|
if quickack || client_writer.has_pending() {
|
||||||
|
client_writer.flush().await.map_err(ProxyError::Io)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn write_client_ack<W>(
|
async fn write_client_ack<W>(
|
||||||
@@ -250,5 +290,6 @@ where
|
|||||||
.write_all(&bytes)
|
.write_all(&bytes)
|
||||||
.await
|
.await
|
||||||
.map_err(ProxyError::Io)?;
|
.map_err(ProxyError::Io)?;
|
||||||
|
// ACK should remain low-latency.
|
||||||
client_writer.flush().await.map_err(ProxyError::Io)
|
client_writer.flush().await.map_err(ProxyError::Io)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -97,6 +97,12 @@ impl Stats {
|
|||||||
.unwrap_or(0)
|
.unwrap_or(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_handshake_timeouts(&self) -> u64 { self.handshake_timeouts.load(Ordering::Relaxed) }
|
||||||
|
|
||||||
|
pub fn iter_user_stats(&self) -> dashmap::iter::Iter<'_, String, UserStats> {
|
||||||
|
self.user_stats.iter()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn uptime_secs(&self) -> f64 {
|
pub fn uptime_secs(&self) -> f64 {
|
||||||
self.start_time.read()
|
self.start_time.read()
|
||||||
.map(|t| t.elapsed().as_secs_f64())
|
.map(|t| t.elapsed().as_secs_f64())
|
||||||
@@ -212,28 +218,41 @@ impl ReplayChecker {
|
|||||||
(hasher.finish() as usize) & self.shard_mask
|
(hasher.finish() as usize) & self.shard_mask
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check(&self, data: &[u8]) -> bool {
|
fn check_and_add_internal(&self, data: &[u8]) -> bool {
|
||||||
self.checks.fetch_add(1, Ordering::Relaxed);
|
self.checks.fetch_add(1, Ordering::Relaxed);
|
||||||
let idx = self.get_shard_idx(data);
|
let idx = self.get_shard_idx(data);
|
||||||
let mut shard = self.shards[idx].lock();
|
let mut shard = self.shards[idx].lock();
|
||||||
let found = shard.check(data, Instant::now(), self.window);
|
let now = Instant::now();
|
||||||
|
let found = shard.check(data, now, self.window);
|
||||||
if found {
|
if found {
|
||||||
self.hits.fetch_add(1, Ordering::Relaxed);
|
self.hits.fetch_add(1, Ordering::Relaxed);
|
||||||
|
} else {
|
||||||
|
shard.add(data, now, self.window);
|
||||||
|
self.additions.fetch_add(1, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
found
|
found
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add(&self, data: &[u8]) {
|
fn add_only(&self, data: &[u8]) {
|
||||||
self.additions.fetch_add(1, Ordering::Relaxed);
|
self.additions.fetch_add(1, Ordering::Relaxed);
|
||||||
let idx = self.get_shard_idx(data);
|
let idx = self.get_shard_idx(data);
|
||||||
let mut shard = self.shards[idx].lock();
|
let mut shard = self.shards[idx].lock();
|
||||||
shard.add(data, Instant::now(), self.window);
|
shard.add(data, Instant::now(), self.window);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn check_handshake(&self, data: &[u8]) -> bool { self.check(data) }
|
pub fn check_and_add_handshake(&self, data: &[u8]) -> bool {
|
||||||
pub fn add_handshake(&self, data: &[u8]) { self.add(data) }
|
self.check_and_add_internal(data)
|
||||||
pub fn check_tls_digest(&self, data: &[u8]) -> bool { self.check(data) }
|
}
|
||||||
pub fn add_tls_digest(&self, data: &[u8]) { self.add(data) }
|
|
||||||
|
pub fn check_and_add_tls_digest(&self, data: &[u8]) -> bool {
|
||||||
|
self.check_and_add_internal(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compatibility helpers (non-atomic split operations) — prefer check_and_add_*.
|
||||||
|
pub fn check_handshake(&self, data: &[u8]) -> bool { self.check_and_add_handshake(data) }
|
||||||
|
pub fn add_handshake(&self, data: &[u8]) { self.add_only(data) }
|
||||||
|
pub fn check_tls_digest(&self, data: &[u8]) -> bool { self.check_and_add_tls_digest(data) }
|
||||||
|
pub fn add_tls_digest(&self, data: &[u8]) { self.add_only(data) }
|
||||||
|
|
||||||
pub fn stats(&self) -> ReplayStats {
|
pub fn stats(&self) -> ReplayStats {
|
||||||
let mut total_entries = 0;
|
let mut total_entries = 0;
|
||||||
@@ -326,10 +345,9 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_replay_checker_basic() {
|
fn test_replay_checker_basic() {
|
||||||
let checker = ReplayChecker::new(100, Duration::from_secs(60));
|
let checker = ReplayChecker::new(100, Duration::from_secs(60));
|
||||||
assert!(!checker.check_handshake(b"test1"));
|
assert!(!checker.check_handshake(b"test1")); // first time, inserts
|
||||||
checker.add_handshake(b"test1");
|
assert!(checker.check_handshake(b"test1")); // duplicate
|
||||||
assert!(checker.check_handshake(b"test1"));
|
assert!(!checker.check_handshake(b"test2")); // new key inserts
|
||||||
assert!(!checker.check_handshake(b"test2"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -343,7 +361,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_replay_checker_expiration() {
|
fn test_replay_checker_expiration() {
|
||||||
let checker = ReplayChecker::new(100, Duration::from_millis(50));
|
let checker = ReplayChecker::new(100, Duration::from_millis(50));
|
||||||
checker.add_handshake(b"expire");
|
assert!(!checker.check_handshake(b"expire"));
|
||||||
assert!(checker.check_handshake(b"expire"));
|
assert!(checker.check_handshake(b"expire"));
|
||||||
std::thread::sleep(Duration::from_millis(100));
|
std::thread::sleep(Duration::from_millis(100));
|
||||||
assert!(!checker.check_handshake(b"expire"));
|
assert!(!checker.check_handshake(b"expire"));
|
||||||
@@ -352,25 +370,25 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_replay_checker_stats() {
|
fn test_replay_checker_stats() {
|
||||||
let checker = ReplayChecker::new(100, Duration::from_secs(60));
|
let checker = ReplayChecker::new(100, Duration::from_secs(60));
|
||||||
checker.add_handshake(b"k1");
|
assert!(!checker.check_handshake(b"k1"));
|
||||||
checker.add_handshake(b"k2");
|
assert!(!checker.check_handshake(b"k2"));
|
||||||
checker.check_handshake(b"k1");
|
assert!(checker.check_handshake(b"k1"));
|
||||||
checker.check_handshake(b"k3");
|
assert!(!checker.check_handshake(b"k3"));
|
||||||
let stats = checker.stats();
|
let stats = checker.stats();
|
||||||
assert_eq!(stats.total_additions, 2);
|
assert_eq!(stats.total_additions, 3);
|
||||||
assert_eq!(stats.total_checks, 2);
|
assert_eq!(stats.total_checks, 4);
|
||||||
assert_eq!(stats.total_hits, 1);
|
assert_eq!(stats.total_hits, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_replay_checker_many_keys() {
|
fn test_replay_checker_many_keys() {
|
||||||
let checker = ReplayChecker::new(1000, Duration::from_secs(60));
|
let checker = ReplayChecker::new(10_000, Duration::from_secs(60));
|
||||||
for i in 0..500u32 {
|
for i in 0..500u32 {
|
||||||
checker.add(&i.to_le_bytes());
|
checker.add_only(&i.to_le_bytes());
|
||||||
}
|
}
|
||||||
for i in 0..500u32 {
|
for i in 0..500u32 {
|
||||||
assert!(checker.check(&i.to_le_bytes()));
|
assert!(checker.check_handshake(&i.to_le_bytes()));
|
||||||
}
|
}
|
||||||
assert_eq!(checker.stats().total_entries, 500);
|
assert_eq!(checker.stats().total_entries, 500);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -381,9 +381,14 @@ mod tests {
|
|||||||
// Add a buffer to pool
|
// Add a buffer to pool
|
||||||
pool.preallocate(1);
|
pool.preallocate(1);
|
||||||
|
|
||||||
// Now try_get should succeed
|
// Now try_get should succeed once while the buffer is held
|
||||||
assert!(pool.try_get().is_some());
|
let buf = pool.try_get();
|
||||||
|
assert!(buf.is_some());
|
||||||
|
// While buffer is held, pool is empty
|
||||||
assert!(pool.try_get().is_none());
|
assert!(pool.try_get().is_none());
|
||||||
|
// Drop buffer -> returns to pool, should be obtainable again
|
||||||
|
drop(buf);
|
||||||
|
assert!(pool.try_get().is_some());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -448,4 +453,4 @@ mod tests {
|
|||||||
// All buffers should be returned
|
// All buffers should be returned
|
||||||
assert!(stats.pooled > 0);
|
assert!(stats.pooled > 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,7 +32,7 @@
|
|||||||
//! and uploads from iOS will break (media/file sending), while small traffic
|
//! and uploads from iOS will break (media/file sending), while small traffic
|
||||||
//! may still work.
|
//! may still work.
|
||||||
|
|
||||||
use bytes::{Bytes, BytesMut, BufMut};
|
use bytes::{Bytes, BytesMut};
|
||||||
use std::io::{self, Error, ErrorKind, Result};
|
use std::io::{self, Error, ErrorKind, Result};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
@@ -51,9 +51,10 @@ use super::state::{StreamState, HeaderBuffer, YieldBuffer, WriteBuffer};
|
|||||||
/// TLS record header size (type + version + length)
|
/// TLS record header size (type + version + length)
|
||||||
const TLS_HEADER_SIZE: usize = 5;
|
const TLS_HEADER_SIZE: usize = 5;
|
||||||
|
|
||||||
/// Maximum TLS fragment size per spec (plaintext fragment).
|
/// Maximum TLS fragment size we emit for Application Data.
|
||||||
/// We use this for *outgoing* chunking, because we build plain ApplicationData records.
|
/// Real TLS 1.3 ciphertexts often add ~16-24 bytes AEAD overhead, so to mimic
|
||||||
const MAX_TLS_PAYLOAD: usize = 16384;
|
/// on-the-wire record sizes we allow up to 16384 + 24 bytes of plaintext.
|
||||||
|
const MAX_TLS_PAYLOAD: usize = 16384 + 24;
|
||||||
|
|
||||||
/// Maximum pending write buffer for one record remainder.
|
/// Maximum pending write buffer for one record remainder.
|
||||||
/// Note: we never queue unlimited amount of data here; state holds at most one record.
|
/// Note: we never queue unlimited amount of data here; state holds at most one record.
|
||||||
@@ -918,10 +919,8 @@ mod tests {
|
|||||||
let reader = ChunkedReader::new(&record, 100);
|
let reader = ChunkedReader::new(&record, 100);
|
||||||
let mut tls_reader = FakeTlsReader::new(reader);
|
let mut tls_reader = FakeTlsReader::new(reader);
|
||||||
|
|
||||||
let mut buf = vec![0u8; payload.len()];
|
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
|
||||||
tls_reader.read_exact(&mut buf).await.unwrap();
|
assert_eq!(&buf[..], payload);
|
||||||
|
|
||||||
assert_eq!(&buf, payload);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -935,13 +934,11 @@ mod tests {
|
|||||||
let reader = ChunkedReader::new(&data, 100);
|
let reader = ChunkedReader::new(&data, 100);
|
||||||
let mut tls_reader = FakeTlsReader::new(reader);
|
let mut tls_reader = FakeTlsReader::new(reader);
|
||||||
|
|
||||||
let mut buf1 = vec![0u8; payload1.len()];
|
let buf1 = tls_reader.read_exact(payload1.len()).await.unwrap();
|
||||||
tls_reader.read_exact(&mut buf1).await.unwrap();
|
assert_eq!(&buf1[..], payload1);
|
||||||
assert_eq!(&buf1, payload1);
|
|
||||||
|
|
||||||
let mut buf2 = vec![0u8; payload2.len()];
|
let buf2 = tls_reader.read_exact(payload2.len()).await.unwrap();
|
||||||
tls_reader.read_exact(&mut buf2).await.unwrap();
|
assert_eq!(&buf2[..], payload2);
|
||||||
assert_eq!(&buf2, payload2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -953,10 +950,9 @@ mod tests {
|
|||||||
let reader = ChunkedReader::new(&record, 1); // 1 byte at a time!
|
let reader = ChunkedReader::new(&record, 1); // 1 byte at a time!
|
||||||
let mut tls_reader = FakeTlsReader::new(reader);
|
let mut tls_reader = FakeTlsReader::new(reader);
|
||||||
|
|
||||||
let mut buf = vec![0u8; payload.len()];
|
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
|
||||||
tls_reader.read_exact(&mut buf).await.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(&buf, payload);
|
assert_eq!(&buf[..], payload);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -967,10 +963,9 @@ mod tests {
|
|||||||
let reader = ChunkedReader::new(&record, 7); // Awkward chunk size
|
let reader = ChunkedReader::new(&record, 7); // Awkward chunk size
|
||||||
let mut tls_reader = FakeTlsReader::new(reader);
|
let mut tls_reader = FakeTlsReader::new(reader);
|
||||||
|
|
||||||
let mut buf = vec![0u8; payload.len()];
|
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
|
||||||
tls_reader.read_exact(&mut buf).await.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(&buf, payload);
|
assert_eq!(&buf[..], payload);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -983,10 +978,9 @@ mod tests {
|
|||||||
let reader = ChunkedReader::new(&data, 100);
|
let reader = ChunkedReader::new(&data, 100);
|
||||||
let mut tls_reader = FakeTlsReader::new(reader);
|
let mut tls_reader = FakeTlsReader::new(reader);
|
||||||
|
|
||||||
let mut buf = vec![0u8; payload.len()];
|
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
|
||||||
tls_reader.read_exact(&mut buf).await.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(&buf, payload);
|
assert_eq!(&buf[..], payload);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -1000,10 +994,9 @@ mod tests {
|
|||||||
let reader = ChunkedReader::new(&data, 3); // Small chunks
|
let reader = ChunkedReader::new(&data, 3); // Small chunks
|
||||||
let mut tls_reader = FakeTlsReader::new(reader);
|
let mut tls_reader = FakeTlsReader::new(reader);
|
||||||
|
|
||||||
let mut buf = vec![0u8; payload.len()];
|
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
|
||||||
tls_reader.read_exact(&mut buf).await.unwrap();
|
|
||||||
|
|
||||||
assert_eq!(&buf, payload);
|
assert_eq!(&buf[..], payload);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -1244,4 +1237,4 @@ mod tests {
|
|||||||
let bytes = header.to_bytes();
|
let bytes = header.to_bytes();
|
||||||
assert_eq!(bytes, [0x17, 0x03, 0x03, 0x12, 0x34]);
|
assert_eq!(bytes, [0x17, 0x03, 0x03, 0x12, 0x34]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,925 +0,0 @@
|
|||||||
//! Middle Proxy RPC Transport
|
|
||||||
//!
|
|
||||||
//! Implements Telegram Middle-End RPC protocol for routing to ALL DCs (including CDN).
|
|
||||||
//!
|
|
||||||
//! ## Phase 3 fixes:
|
|
||||||
//! - ROOT CAUSE: Use Telegram proxy-secret (binary file) not user secret
|
|
||||||
//! - Streaming handshake response (no fixed-size read deadlock)
|
|
||||||
//! - Health monitoring + reconnection
|
|
||||||
//! - Hex diagnostics for debugging
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
|
||||||
use std::time::Duration;
|
|
||||||
use bytes::{Bytes, BytesMut};
|
|
||||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
|
||||||
use tokio::net::TcpStream;
|
|
||||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
|
||||||
use tokio::time::{timeout, Instant};
|
|
||||||
use tracing::{debug, info, trace, warn, error};
|
|
||||||
|
|
||||||
use crate::crypto::{crc32, derive_middleproxy_keys, AesCbc, SecureRandom};
|
|
||||||
use crate::error::{ProxyError, Result};
|
|
||||||
use crate::protocol::constants::*;
|
|
||||||
|
|
||||||
// ========== Proxy Secret Fetching ==========
|
|
||||||
|
|
||||||
/// Fetch the Telegram proxy-secret binary file.
|
|
||||||
///
|
|
||||||
/// This is NOT the user secret (-S flag, 16 bytes hex for clients).
|
|
||||||
/// This is the infrastructure secret (--aes-pwd in C MTProxy),
|
|
||||||
/// a binary file of 32-512 bytes used for ME RPC key derivation.
|
|
||||||
///
|
|
||||||
/// Strategy: try local cache, then download from Telegram.
|
|
||||||
pub async fn fetch_proxy_secret(cache_path: Option<&str>) -> Result<Vec<u8>> {
|
|
||||||
let cache = cache_path.unwrap_or("proxy-secret");
|
|
||||||
|
|
||||||
// 1. Try local cache (< 24h old)
|
|
||||||
if let Ok(metadata) = tokio::fs::metadata(cache).await {
|
|
||||||
if let Ok(modified) = metadata.modified() {
|
|
||||||
let age = std::time::SystemTime::now()
|
|
||||||
.duration_since(modified)
|
|
||||||
.unwrap_or(Duration::from_secs(u64::MAX));
|
|
||||||
if age < Duration::from_secs(86400) {
|
|
||||||
if let Ok(data) = tokio::fs::read(cache).await {
|
|
||||||
if data.len() >= 32 {
|
|
||||||
info!(
|
|
||||||
path = cache,
|
|
||||||
len = data.len(),
|
|
||||||
age_hours = age.as_secs() / 3600,
|
|
||||||
"Loaded proxy-secret from cache"
|
|
||||||
);
|
|
||||||
return Ok(data);
|
|
||||||
}
|
|
||||||
warn!(path = cache, len = data.len(), "Cached proxy-secret too short");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Download from Telegram
|
|
||||||
info!("Downloading proxy-secret from core.telegram.org...");
|
|
||||||
let data = download_proxy_secret().await?;
|
|
||||||
|
|
||||||
// 3. Cache locally (best-effort)
|
|
||||||
if let Err(e) = tokio::fs::write(cache, &data).await {
|
|
||||||
warn!(error = %e, "Failed to cache proxy-secret (non-fatal)");
|
|
||||||
} else {
|
|
||||||
debug!(path = cache, len = data.len(), "Cached proxy-secret");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn download_proxy_secret() -> Result<Vec<u8>> {
|
|
||||||
let url = "https://core.telegram.org/getProxySecret";
|
|
||||||
let resp = reqwest::get(url)
|
|
||||||
.await
|
|
||||||
.map_err(|e| ProxyError::Proxy(format!("Failed to download proxy-secret: {}", e)))?;
|
|
||||||
|
|
||||||
if !resp.status().is_success() {
|
|
||||||
return Err(ProxyError::Proxy(format!(
|
|
||||||
"proxy-secret download HTTP {}", resp.status()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let data = resp.bytes().await
|
|
||||||
.map_err(|e| ProxyError::Proxy(format!("Read proxy-secret body: {}", e)))?
|
|
||||||
.to_vec();
|
|
||||||
|
|
||||||
if data.len() < 32 {
|
|
||||||
return Err(ProxyError::Proxy(format!(
|
|
||||||
"proxy-secret too short: {} bytes (need >= 32)", data.len()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
info!(len = data.len(), "Downloaded proxy-secret OK");
|
|
||||||
Ok(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== RPC Frame helpers ==========
|
|
||||||
|
|
||||||
/// Build an RPC frame: [len(4) | seq_no(4) | payload | crc32(4)]
|
|
||||||
fn build_rpc_frame(seq_no: i32, payload: &[u8]) -> Vec<u8> {
|
|
||||||
let total_len = (4 + 4 + payload.len() + 4) as u32;
|
|
||||||
let mut f = Vec::with_capacity(total_len as usize);
|
|
||||||
f.extend_from_slice(&total_len.to_le_bytes());
|
|
||||||
f.extend_from_slice(&seq_no.to_le_bytes());
|
|
||||||
f.extend_from_slice(payload);
|
|
||||||
let c = crc32(&f);
|
|
||||||
f.extend_from_slice(&c.to_le_bytes());
|
|
||||||
f
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read one plaintext RPC frame. Returns (seq_no, payload).
|
|
||||||
async fn read_rpc_frame_plaintext(
|
|
||||||
rd: &mut (impl AsyncReadExt + Unpin),
|
|
||||||
) -> Result<(i32, Vec<u8>)> {
|
|
||||||
let mut len_buf = [0u8; 4];
|
|
||||||
rd.read_exact(&mut len_buf).await.map_err(ProxyError::Io)?;
|
|
||||||
let total_len = u32::from_le_bytes(len_buf) as usize;
|
|
||||||
|
|
||||||
if total_len < 12 || total_len > (1 << 24) {
|
|
||||||
return Err(ProxyError::InvalidHandshake(
|
|
||||||
format!("Bad RPC frame length: {}", total_len),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut rest = vec![0u8; total_len - 4];
|
|
||||||
rd.read_exact(&mut rest).await.map_err(ProxyError::Io)?;
|
|
||||||
|
|
||||||
let mut full = Vec::with_capacity(total_len);
|
|
||||||
full.extend_from_slice(&len_buf);
|
|
||||||
full.extend_from_slice(&rest);
|
|
||||||
|
|
||||||
let crc_offset = total_len - 4;
|
|
||||||
let expected_crc = u32::from_le_bytes([
|
|
||||||
full[crc_offset], full[crc_offset + 1],
|
|
||||||
full[crc_offset + 2], full[crc_offset + 3],
|
|
||||||
]);
|
|
||||||
let actual_crc = crc32(&full[..crc_offset]);
|
|
||||||
if expected_crc != actual_crc {
|
|
||||||
return Err(ProxyError::InvalidHandshake(
|
|
||||||
format!("CRC mismatch: 0x{:08x} vs 0x{:08x}", expected_crc, actual_crc),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let seq_no = i32::from_le_bytes([full[4], full[5], full[6], full[7]]);
|
|
||||||
let payload = full[8..crc_offset].to_vec();
|
|
||||||
Ok((seq_no, payload))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== RPC Nonce (32 bytes payload) ==========
|
|
||||||
|
|
||||||
fn build_nonce_payload(key_selector: u32, crypto_ts: u32, nonce: &[u8; 16]) -> [u8; 32] {
|
|
||||||
let mut p = [0u8; 32];
|
|
||||||
p[0..4].copy_from_slice(&RPC_NONCE_U32.to_le_bytes());
|
|
||||||
p[4..8].copy_from_slice(&key_selector.to_le_bytes());
|
|
||||||
p[8..12].copy_from_slice(&RPC_CRYPTO_AES_U32.to_le_bytes());
|
|
||||||
p[12..16].copy_from_slice(&crypto_ts.to_le_bytes());
|
|
||||||
p[16..32].copy_from_slice(nonce);
|
|
||||||
p
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parse_nonce_payload(d: &[u8]) -> Result<(u32, u32, [u8; 16])> {
|
|
||||||
if d.len() < 32 {
|
|
||||||
return Err(ProxyError::InvalidHandshake(
|
|
||||||
format!("Nonce payload too short: {} bytes", d.len()),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let t = u32::from_le_bytes([d[0], d[1], d[2], d[3]]);
|
|
||||||
if t != RPC_NONCE_U32 {
|
|
||||||
return Err(ProxyError::InvalidHandshake(
|
|
||||||
format!("Expected RPC_NONCE 0x{:08x}, got 0x{:08x}", RPC_NONCE_U32, t),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let schema = u32::from_le_bytes([d[8], d[9], d[10], d[11]]);
|
|
||||||
let ts = u32::from_le_bytes([d[12], d[13], d[14], d[15]]);
|
|
||||||
let mut nonce = [0u8; 16];
|
|
||||||
nonce.copy_from_slice(&d[16..32]);
|
|
||||||
Ok((schema, ts, nonce))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== RPC Handshake (32 bytes payload) ==========
|
|
||||||
|
|
||||||
fn build_handshake_payload(our_ip: u32, our_port: u16, peer_ip: u32, peer_port: u16) -> [u8; 32] {
|
|
||||||
let mut p = [0u8; 32];
|
|
||||||
p[0..4].copy_from_slice(&RPC_HANDSHAKE_U32.to_le_bytes());
|
|
||||||
// flags = 0 at offset 4..8
|
|
||||||
|
|
||||||
// sender_pid: {ip(4), port(2), pid(2), utime(4)} at offset 8..20
|
|
||||||
p[8..12].copy_from_slice(&our_ip.to_le_bytes());
|
|
||||||
p[12..14].copy_from_slice(&our_port.to_le_bytes());
|
|
||||||
let pid = (std::process::id() & 0xFFFF) as u16;
|
|
||||||
p[14..16].copy_from_slice(&pid.to_le_bytes());
|
|
||||||
let utime = std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap_or_default()
|
|
||||||
.as_secs() as u32;
|
|
||||||
p[16..20].copy_from_slice(&utime.to_le_bytes());
|
|
||||||
|
|
||||||
// peer_pid: {ip(4), port(2), pid(2), utime(4)} at offset 20..32
|
|
||||||
p[20..24].copy_from_slice(&peer_ip.to_le_bytes());
|
|
||||||
p[24..26].copy_from_slice(&peer_port.to_le_bytes());
|
|
||||||
p
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== CBC helpers ==========
|
|
||||||
|
|
||||||
fn cbc_encrypt_padded(key: &[u8; 32], iv: &[u8; 16], plaintext: &[u8]) -> Result<(Vec<u8>, [u8; 16])> {
|
|
||||||
let pad = (16 - (plaintext.len() % 16)) % 16;
|
|
||||||
let mut buf = plaintext.to_vec();
|
|
||||||
let pad_pattern: [u8; 4] = [0x04, 0x00, 0x00, 0x00];
|
|
||||||
for i in 0..pad {
|
|
||||||
buf.push(pad_pattern[i % 4]);
|
|
||||||
}
|
|
||||||
let cipher = AesCbc::new(*key, *iv);
|
|
||||||
cipher.encrypt_in_place(&mut buf)
|
|
||||||
.map_err(|e| ProxyError::Crypto(format!("CBC encrypt: {}", e)))?;
|
|
||||||
let mut new_iv = [0u8; 16];
|
|
||||||
if buf.len() >= 16 {
|
|
||||||
new_iv.copy_from_slice(&buf[buf.len() - 16..]);
|
|
||||||
}
|
|
||||||
Ok((buf, new_iv))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cbc_decrypt_inplace(key: &[u8; 32], iv: &[u8; 16], data: &mut [u8]) -> Result<[u8; 16]> {
|
|
||||||
let mut new_iv = [0u8; 16];
|
|
||||||
if data.len() >= 16 {
|
|
||||||
new_iv.copy_from_slice(&data[data.len() - 16..]);
|
|
||||||
}
|
|
||||||
AesCbc::new(*key, *iv)
|
|
||||||
.decrypt_in_place(data)
|
|
||||||
.map_err(|e| ProxyError::Crypto(format!("CBC decrypt: {}", e)))?;
|
|
||||||
Ok(new_iv)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== IPv4 helpers ==========
|
|
||||||
|
|
||||||
fn ipv4_to_mapped_v6(ip: Ipv4Addr) -> [u8; 16] {
|
|
||||||
let mut buf = [0u8; 16];
|
|
||||||
buf[10] = 0xFF;
|
|
||||||
buf[11] = 0xFF;
|
|
||||||
let o = ip.octets();
|
|
||||||
buf[12] = o[0]; buf[13] = o[1]; buf[14] = o[2]; buf[15] = o[3];
|
|
||||||
buf
|
|
||||||
}
|
|
||||||
|
|
||||||
fn addr_to_ip_u32(addr: &SocketAddr) -> u32 {
|
|
||||||
match addr.ip() {
|
|
||||||
IpAddr::V4(v4) => u32::from_be_bytes(v4.octets()),
|
|
||||||
IpAddr::V6(v6) => {
|
|
||||||
if let Some(v4) = v6.to_ipv4_mapped() {
|
|
||||||
u32::from_be_bytes(v4.octets())
|
|
||||||
} else { 0 }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== ME Response ==========
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum MeResponse {
|
|
||||||
Data(Bytes),
|
|
||||||
Ack(u32),
|
|
||||||
Close,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== Connection Registry ==========
|
|
||||||
|
|
||||||
pub struct ConnRegistry {
|
|
||||||
map: RwLock<HashMap<u64, mpsc::Sender<MeResponse>>>,
|
|
||||||
next_id: AtomicU64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConnRegistry {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
map: RwLock::new(HashMap::new()),
|
|
||||||
next_id: AtomicU64::new(1),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub async fn register(&self) -> (u64, mpsc::Receiver<MeResponse>) {
|
|
||||||
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
|
|
||||||
let (tx, rx) = mpsc::channel(256);
|
|
||||||
self.map.write().await.insert(id, tx);
|
|
||||||
(id, rx)
|
|
||||||
}
|
|
||||||
pub async fn unregister(&self, id: u64) {
|
|
||||||
self.map.write().await.remove(&id);
|
|
||||||
}
|
|
||||||
pub async fn route(&self, id: u64, resp: MeResponse) -> bool {
|
|
||||||
let m = self.map.read().await;
|
|
||||||
if let Some(tx) = m.get(&id) {
|
|
||||||
tx.send(resp).await.is_ok()
|
|
||||||
} else { false }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== RPC Writer (streaming CBC) ==========
|
|
||||||
|
|
||||||
struct RpcWriter {
|
|
||||||
writer: tokio::io::WriteHalf<TcpStream>,
|
|
||||||
key: [u8; 32],
|
|
||||||
iv: [u8; 16],
|
|
||||||
seq_no: i32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RpcWriter {
|
|
||||||
async fn send(&mut self, payload: &[u8]) -> Result<()> {
|
|
||||||
let frame = build_rpc_frame(self.seq_no, payload);
|
|
||||||
self.seq_no += 1;
|
|
||||||
|
|
||||||
let pad = (16 - (frame.len() % 16)) % 16;
|
|
||||||
let mut buf = frame;
|
|
||||||
let pad_pattern: [u8; 4] = [0x04, 0x00, 0x00, 0x00];
|
|
||||||
for i in 0..pad {
|
|
||||||
buf.push(pad_pattern[i % 4]);
|
|
||||||
}
|
|
||||||
|
|
||||||
let cipher = AesCbc::new(self.key, self.iv);
|
|
||||||
cipher.encrypt_in_place(&mut buf)
|
|
||||||
.map_err(|e| ProxyError::Crypto(format!("{}", e)))?;
|
|
||||||
|
|
||||||
if buf.len() >= 16 {
|
|
||||||
self.iv.copy_from_slice(&buf[buf.len() - 16..]);
|
|
||||||
}
|
|
||||||
self.writer.write_all(&buf).await.map_err(ProxyError::Io)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== RPC_PROXY_REQ ==========
|
|
||||||
|
|
||||||
|
|
||||||
fn build_proxy_req_payload(
|
|
||||||
conn_id: u64,
|
|
||||||
client_addr: SocketAddr,
|
|
||||||
our_addr: SocketAddr,
|
|
||||||
data: &[u8],
|
|
||||||
proxy_tag: Option<&[u8]>,
|
|
||||||
proto_flags: u32,
|
|
||||||
) -> Vec<u8> {
|
|
||||||
// flags are pre-calculated by proto_flags_for_tag
|
|
||||||
// We just need to ensure FLAG_HAS_AD_TAG is set if we have a tag (it is set by default in our new function, but let's be safe)
|
|
||||||
let mut flags = proto_flags;
|
|
||||||
|
|
||||||
// The C code logic:
|
|
||||||
// flags = (transport_flags) | 0x1000 | 0x20000 | 0x8 (if tag)
|
|
||||||
// Our proto_flags_for_tag returns: 0x8 | 0x1000 | 0x20000 | transport_flags
|
|
||||||
// So we are good.
|
|
||||||
|
|
||||||
let b_cap = 128 + data.len();
|
|
||||||
let mut b = Vec::with_capacity(b_cap);
|
|
||||||
|
|
||||||
b.extend_from_slice(&RPC_PROXY_REQ_U32.to_le_bytes());
|
|
||||||
b.extend_from_slice(&flags.to_le_bytes());
|
|
||||||
b.extend_from_slice(&conn_id.to_le_bytes());
|
|
||||||
|
|
||||||
// Client IP (16 bytes IPv4-mapped-v6) + port (4 bytes)
|
|
||||||
match client_addr.ip() {
|
|
||||||
IpAddr::V4(v4) => b.extend_from_slice(&ipv4_to_mapped_v6(v4)),
|
|
||||||
IpAddr::V6(v6) => b.extend_from_slice(&v6.octets()),
|
|
||||||
}
|
|
||||||
b.extend_from_slice(&(client_addr.port() as u32).to_le_bytes());
|
|
||||||
|
|
||||||
// Our IP (16 bytes) + port (4 bytes)
|
|
||||||
match our_addr.ip() {
|
|
||||||
IpAddr::V4(v4) => b.extend_from_slice(&ipv4_to_mapped_v6(v4)),
|
|
||||||
IpAddr::V6(v6) => b.extend_from_slice(&v6.octets()),
|
|
||||||
}
|
|
||||||
b.extend_from_slice(&(our_addr.port() as u32).to_le_bytes());
|
|
||||||
|
|
||||||
// Extra section (proxy_tag)
|
|
||||||
if flags & 12 != 0 {
|
|
||||||
let extra_start = b.len();
|
|
||||||
b.extend_from_slice(&0u32.to_le_bytes()); // placeholder
|
|
||||||
|
|
||||||
if let Some(tag) = proxy_tag {
|
|
||||||
b.extend_from_slice(&TL_PROXY_TAG_U32.to_le_bytes());
|
|
||||||
// TL string encoding
|
|
||||||
if tag.len() < 254 {
|
|
||||||
b.push(tag.len() as u8);
|
|
||||||
b.extend_from_slice(tag);
|
|
||||||
let pad = (4 - ((1 + tag.len()) % 4)) % 4;
|
|
||||||
b.extend(std::iter::repeat(0u8).take(pad));
|
|
||||||
} else {
|
|
||||||
b.push(0xfe);
|
|
||||||
let len_bytes = (tag.len() as u32).to_le_bytes();
|
|
||||||
b.extend_from_slice(&len_bytes[..3]);
|
|
||||||
b.extend_from_slice(tag);
|
|
||||||
let pad = (4 - (tag.len() % 4)) % 4;
|
|
||||||
b.extend(std::iter::repeat(0u8).take(pad));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let extra_bytes = (b.len() - extra_start - 4) as u32;
|
|
||||||
let eb = extra_bytes.to_le_bytes();
|
|
||||||
b[extra_start..extra_start + 4].copy_from_slice(&eb);
|
|
||||||
}
|
|
||||||
|
|
||||||
b.extend_from_slice(data);
|
|
||||||
b
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== ME Pool ==========
|
|
||||||
|
|
||||||
pub struct MePool {
|
|
||||||
registry: Arc<ConnRegistry>,
|
|
||||||
writers: Arc<RwLock<Vec<Arc<Mutex<RpcWriter>>>>>,
|
|
||||||
rr: AtomicU64,
|
|
||||||
proxy_tag: Option<Vec<u8>>,
|
|
||||||
/// Telegram proxy-secret (binary, 32-512 bytes)
|
|
||||||
proxy_secret: Vec<u8>,
|
|
||||||
pool_size: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MePool {
|
|
||||||
pub fn new(proxy_tag: Option<Vec<u8>>, proxy_secret: Vec<u8>) -> Arc<Self> {
|
|
||||||
Arc::new(Self {
|
|
||||||
registry: Arc::new(ConnRegistry::new()),
|
|
||||||
writers: Arc::new(RwLock::new(Vec::new())),
|
|
||||||
rr: AtomicU64::new(0),
|
|
||||||
proxy_tag,
|
|
||||||
proxy_secret,
|
|
||||||
pool_size: 2,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn registry(&self) -> &Arc<ConnRegistry> {
|
|
||||||
&self.registry
|
|
||||||
}
|
|
||||||
|
|
||||||
fn writers_arc(&self) -> Arc<RwLock<Vec<Arc<Mutex<RpcWriter>>>>> {
|
|
||||||
self.writers.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// key_selector = first 4 bytes of proxy-secret as LE u32
|
|
||||||
/// C: main_secret.key_signature via union { char secret[]; int key_signature; }
|
|
||||||
fn key_selector(&self) -> u32 {
|
|
||||||
if self.proxy_secret.len() >= 4 {
|
|
||||||
u32::from_le_bytes([
|
|
||||||
self.proxy_secret[0], self.proxy_secret[1],
|
|
||||||
self.proxy_secret[2], self.proxy_secret[3],
|
|
||||||
])
|
|
||||||
} else { 0 }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn init(
|
|
||||||
self: &Arc<Self>,
|
|
||||||
pool_size: usize,
|
|
||||||
rng: &SecureRandom,
|
|
||||||
) -> Result<()> {
|
|
||||||
let addrs = &*TG_MIDDLE_PROXIES_FLAT_V4;
|
|
||||||
let ks = self.key_selector();
|
|
||||||
info!(
|
|
||||||
me_servers = addrs.len(),
|
|
||||||
pool_size,
|
|
||||||
key_selector = format_args!("0x{:08x}", ks),
|
|
||||||
secret_len = self.proxy_secret.len(),
|
|
||||||
"Initializing ME pool"
|
|
||||||
);
|
|
||||||
|
|
||||||
for &(ip, port) in addrs.iter() {
|
|
||||||
for i in 0..pool_size {
|
|
||||||
let addr = SocketAddr::new(ip, port);
|
|
||||||
match self.connect_one(addr, rng).await {
|
|
||||||
Ok(()) => info!(%addr, idx = i, "ME connected"),
|
|
||||||
Err(e) => warn!(%addr, idx = i, error = %e, "ME connect failed"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if self.writers.read().await.len() >= pool_size {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.writers.read().await.is_empty() {
|
|
||||||
return Err(ProxyError::Proxy("No ME connections".into()));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn connect_one(
|
|
||||||
self: &Arc<Self>,
|
|
||||||
addr: SocketAddr,
|
|
||||||
rng: &SecureRandom,
|
|
||||||
) -> Result<()> {
|
|
||||||
let secret = &self.proxy_secret;
|
|
||||||
if secret.len() < 32 {
|
|
||||||
return Err(ProxyError::Proxy("proxy-secret too short for ME auth".into()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// ===== TCP connect =====
|
|
||||||
let stream = timeout(
|
|
||||||
Duration::from_secs(ME_CONNECT_TIMEOUT_SECS),
|
|
||||||
TcpStream::connect(addr),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(|_| ProxyError::ConnectionTimeout { addr: addr.to_string() })?
|
|
||||||
.map_err(ProxyError::Io)?;
|
|
||||||
stream.set_nodelay(true).ok();
|
|
||||||
|
|
||||||
let local_addr = stream.local_addr().map_err(ProxyError::Io)?;
|
|
||||||
let peer_addr = stream.peer_addr().map_err(ProxyError::Io)?;
|
|
||||||
let (mut rd, mut wr) = tokio::io::split(stream);
|
|
||||||
|
|
||||||
// ===== 1. Send RPC nonce (plaintext, seq=-2) =====
|
|
||||||
let my_nonce: [u8; 16] = rng.bytes(16).try_into().unwrap();
|
|
||||||
let crypto_ts = std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap_or_default()
|
|
||||||
.as_secs() as u32;
|
|
||||||
let ks = self.key_selector();
|
|
||||||
|
|
||||||
let nonce_payload = build_nonce_payload(ks, crypto_ts, &my_nonce);
|
|
||||||
let nonce_frame = build_rpc_frame(-2, &nonce_payload);
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
%addr,
|
|
||||||
frame_len = nonce_frame.len(),
|
|
||||||
key_sel = format_args!("0x{:08x}", ks),
|
|
||||||
crypto_ts,
|
|
||||||
"Sending nonce"
|
|
||||||
);
|
|
||||||
|
|
||||||
wr.write_all(&nonce_frame).await.map_err(ProxyError::Io)?;
|
|
||||||
wr.flush().await.map_err(ProxyError::Io)?;
|
|
||||||
|
|
||||||
// ===== 2. Read server nonce (plaintext, seq=-2) =====
|
|
||||||
let (srv_seq, srv_nonce_payload) = timeout(
|
|
||||||
Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS),
|
|
||||||
read_rpc_frame_plaintext(&mut rd),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(|_| ProxyError::TgHandshakeTimeout)??;
|
|
||||||
|
|
||||||
if srv_seq != -2 {
|
|
||||||
return Err(ProxyError::InvalidHandshake(
|
|
||||||
format!("Expected seq=-2, got {}", srv_seq),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let (schema, _srv_ts, srv_nonce) = parse_nonce_payload(&srv_nonce_payload)?;
|
|
||||||
if schema != RPC_CRYPTO_AES_U32 {
|
|
||||||
return Err(ProxyError::InvalidHandshake(
|
|
||||||
format!("Unsupported crypto schema: 0x{:x}", schema),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!(%addr, "Nonce exchange OK, deriving keys");
|
|
||||||
|
|
||||||
// ===== 3. Derive AES-256-CBC keys =====
|
|
||||||
// C buffer layout:
|
|
||||||
// [0..16] nonce_server (srv_nonce)
|
|
||||||
// [16..32] nonce_client (my_nonce)
|
|
||||||
// [32..36] client_timestamp
|
|
||||||
// [36..40] server_ip
|
|
||||||
// [40..42] client_port
|
|
||||||
// [42..48] "CLIENT" or "SERVER"
|
|
||||||
// [48..52] client_ip
|
|
||||||
// [52..54] server_port
|
|
||||||
// [54..54+N] secret (proxy-secret binary)
|
|
||||||
// [54+N..70+N] nonce_server
|
|
||||||
// nonce_client(16)
|
|
||||||
|
|
||||||
let ts_bytes = crypto_ts.to_le_bytes();
|
|
||||||
let server_ip = addr_to_ip_u32(&peer_addr);
|
|
||||||
let client_ip = addr_to_ip_u32(&local_addr);
|
|
||||||
let server_ip_bytes = server_ip.to_le_bytes();
|
|
||||||
let client_ip_bytes = client_ip.to_le_bytes();
|
|
||||||
let server_port_bytes = peer_addr.port().to_le_bytes();
|
|
||||||
let client_port_bytes = local_addr.port().to_le_bytes();
|
|
||||||
|
|
||||||
let (wk, wi) = derive_middleproxy_keys(
|
|
||||||
&srv_nonce, &my_nonce, &ts_bytes,
|
|
||||||
Some(&server_ip_bytes), &client_port_bytes,
|
|
||||||
b"CLIENT",
|
|
||||||
Some(&client_ip_bytes), &server_port_bytes,
|
|
||||||
secret, None, None,
|
|
||||||
);
|
|
||||||
let (rk, ri) = derive_middleproxy_keys(
|
|
||||||
&srv_nonce, &my_nonce, &ts_bytes,
|
|
||||||
Some(&server_ip_bytes), &client_port_bytes,
|
|
||||||
b"SERVER",
|
|
||||||
Some(&client_ip_bytes), &server_port_bytes,
|
|
||||||
secret, None, None,
|
|
||||||
);
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
%addr,
|
|
||||||
write_key = %hex::encode(&wk[..8]),
|
|
||||||
read_key = %hex::encode(&rk[..8]),
|
|
||||||
"Keys derived"
|
|
||||||
);
|
|
||||||
|
|
||||||
// ===== 4. Send encrypted handshake (seq=-1) =====
|
|
||||||
let hs_payload = build_handshake_payload(
|
|
||||||
client_ip, local_addr.port(),
|
|
||||||
server_ip, peer_addr.port(),
|
|
||||||
);
|
|
||||||
let hs_frame = build_rpc_frame(-1, &hs_payload);
|
|
||||||
let (encrypted_hs, write_iv) = cbc_encrypt_padded(&wk, &wi, &hs_frame)?;
|
|
||||||
wr.write_all(&encrypted_hs).await.map_err(ProxyError::Io)?;
|
|
||||||
wr.flush().await.map_err(ProxyError::Io)?;
|
|
||||||
|
|
||||||
debug!(%addr, enc_len = encrypted_hs.len(), "Sent encrypted handshake");
|
|
||||||
|
|
||||||
// ===== 5. Read encrypted handshake response (STREAMING) =====
|
|
||||||
// Server sends encrypted handshake. C crypto layer may send partial
|
|
||||||
// blocks (only complete 16-byte blocks get encrypted at a time).
|
|
||||||
// We read incrementally and decrypt block-by-block.
|
|
||||||
let deadline = Instant::now() + Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS);
|
|
||||||
let mut enc_buf = BytesMut::with_capacity(256);
|
|
||||||
let mut dec_buf = BytesMut::with_capacity(256);
|
|
||||||
let mut read_iv = ri;
|
|
||||||
let mut handshake_ok = false;
|
|
||||||
|
|
||||||
while Instant::now() < deadline && !handshake_ok {
|
|
||||||
let remaining = deadline - Instant::now();
|
|
||||||
let mut tmp = [0u8; 256];
|
|
||||||
let n = match timeout(remaining, rd.read(&mut tmp)).await {
|
|
||||||
Ok(Ok(0)) => return Err(ProxyError::Io(std::io::Error::new(
|
|
||||||
std::io::ErrorKind::UnexpectedEof, "ME closed during handshake",
|
|
||||||
))),
|
|
||||||
Ok(Ok(n)) => n,
|
|
||||||
Ok(Err(e)) => return Err(ProxyError::Io(e)),
|
|
||||||
Err(_) => return Err(ProxyError::TgHandshakeTimeout),
|
|
||||||
};
|
|
||||||
enc_buf.extend_from_slice(&tmp[..n]);
|
|
||||||
|
|
||||||
// Decrypt complete 16-byte blocks
|
|
||||||
let blocks = enc_buf.len() / 16 * 16;
|
|
||||||
if blocks > 0 {
|
|
||||||
let mut chunk = vec![0u8; blocks];
|
|
||||||
chunk.copy_from_slice(&enc_buf[..blocks]);
|
|
||||||
let new_iv = cbc_decrypt_inplace(&rk, &read_iv, &mut chunk)?;
|
|
||||||
read_iv = new_iv;
|
|
||||||
dec_buf.extend_from_slice(&chunk);
|
|
||||||
let _ = enc_buf.split_to(blocks);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to parse RPC frame from decrypted data
|
|
||||||
while dec_buf.len() >= 4 {
|
|
||||||
let fl = u32::from_le_bytes([
|
|
||||||
dec_buf[0], dec_buf[1], dec_buf[2], dec_buf[3],
|
|
||||||
]) as usize;
|
|
||||||
|
|
||||||
// Skip noop padding
|
|
||||||
if fl == 4 {
|
|
||||||
let _ = dec_buf.split_to(4);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if fl < 12 || fl > (1 << 24) {
|
|
||||||
return Err(ProxyError::InvalidHandshake(
|
|
||||||
format!("Bad HS response frame len: {}", fl),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
if dec_buf.len() < fl {
|
|
||||||
break; // need more data
|
|
||||||
}
|
|
||||||
|
|
||||||
let frame = dec_buf.split_to(fl);
|
|
||||||
|
|
||||||
// CRC32 check
|
|
||||||
let pe = fl - 4;
|
|
||||||
let ec = u32::from_le_bytes([
|
|
||||||
frame[pe], frame[pe + 1], frame[pe + 2], frame[pe + 3],
|
|
||||||
]);
|
|
||||||
let ac = crc32(&frame[..pe]);
|
|
||||||
if ec != ac {
|
|
||||||
return Err(ProxyError::InvalidHandshake(
|
|
||||||
format!("HS CRC mismatch: 0x{:08x} vs 0x{:08x}", ec, ac),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check type
|
|
||||||
let hs_type = u32::from_le_bytes([
|
|
||||||
frame[8], frame[9], frame[10], frame[11],
|
|
||||||
]);
|
|
||||||
if hs_type == RPC_HANDSHAKE_ERROR_U32 {
|
|
||||||
let err_code = if frame.len() >= 16 {
|
|
||||||
i32::from_le_bytes([frame[12], frame[13], frame[14], frame[15]])
|
|
||||||
} else { -1 };
|
|
||||||
return Err(ProxyError::InvalidHandshake(
|
|
||||||
format!("ME rejected handshake (error={})", err_code),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
if hs_type != RPC_HANDSHAKE_U32 {
|
|
||||||
return Err(ProxyError::InvalidHandshake(
|
|
||||||
format!("Expected HANDSHAKE 0x{:08x}, got 0x{:08x}", RPC_HANDSHAKE_U32, hs_type),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
handshake_ok = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !handshake_ok {
|
|
||||||
return Err(ProxyError::TgHandshakeTimeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
info!(%addr, "RPC handshake OK");
|
|
||||||
|
|
||||||
// ===== 6. Setup writer + reader =====
|
|
||||||
let rpc_w = Arc::new(Mutex::new(RpcWriter {
|
|
||||||
writer: wr,
|
|
||||||
key: wk,
|
|
||||||
iv: write_iv,
|
|
||||||
seq_no: 0,
|
|
||||||
}));
|
|
||||||
self.writers.write().await.push(rpc_w.clone());
|
|
||||||
|
|
||||||
let reg = self.registry.clone();
|
|
||||||
let w_pong = rpc_w.clone();
|
|
||||||
let w_pool = self.writers_arc();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
if let Err(e) = reader_loop(rd, rk, read_iv, reg, enc_buf, dec_buf, w_pong.clone()).await {
|
|
||||||
warn!(error = %e, "ME reader ended");
|
|
||||||
}
|
|
||||||
// Remove dead writer from pool
|
|
||||||
let mut ws = w_pool.write().await;
|
|
||||||
ws.retain(|w| !Arc::ptr_eq(w, &w_pong));
|
|
||||||
info!(remaining = ws.len(), "Dead ME writer removed from pool");
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_proxy_req(
|
|
||||||
&self,
|
|
||||||
conn_id: u64,
|
|
||||||
client_addr: SocketAddr,
|
|
||||||
our_addr: SocketAddr,
|
|
||||||
data: &[u8],
|
|
||||||
proto_flags: u32,
|
|
||||||
) -> Result<()> {
|
|
||||||
let payload = build_proxy_req_payload(
|
|
||||||
conn_id, client_addr, our_addr, data,
|
|
||||||
self.proxy_tag.as_deref(), proto_flags,
|
|
||||||
);
|
|
||||||
loop {
|
|
||||||
let ws = self.writers.read().await;
|
|
||||||
if ws.is_empty() {
|
|
||||||
return Err(ProxyError::Proxy("All ME connections dead".into()));
|
|
||||||
}
|
|
||||||
let idx = self.rr.fetch_add(1, Ordering::Relaxed) as usize % ws.len();
|
|
||||||
let w = ws[idx].clone();
|
|
||||||
drop(ws);
|
|
||||||
match w.lock().await.send(&payload).await {
|
|
||||||
Ok(()) => return Ok(()),
|
|
||||||
Err(e) => {
|
|
||||||
warn!(error = %e, "ME write failed, removing dead conn");
|
|
||||||
let mut ws = self.writers.write().await;
|
|
||||||
ws.retain(|o| !Arc::ptr_eq(o, &w));
|
|
||||||
if ws.is_empty() {
|
|
||||||
return Err(ProxyError::Proxy("All ME connections dead".into()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn send_close(&self, conn_id: u64) -> Result<()> {
|
|
||||||
let ws = self.writers.read().await;
|
|
||||||
if !ws.is_empty() {
|
|
||||||
let w = ws[0].clone();
|
|
||||||
drop(ws);
|
|
||||||
let mut p = Vec::with_capacity(12);
|
|
||||||
p.extend_from_slice(&RPC_CLOSE_EXT_U32.to_le_bytes());
|
|
||||||
p.extend_from_slice(&conn_id.to_le_bytes());
|
|
||||||
if let Err(e) = w.lock().await.send(&p).await {
|
|
||||||
debug!(error = %e, "ME close write failed");
|
|
||||||
let mut ws = self.writers.write().await;
|
|
||||||
ws.retain(|o| !Arc::ptr_eq(o, &w));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.registry.unregister(conn_id).await;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn connection_count(&self) -> usize {
|
|
||||||
self.writers.try_read().map(|w| w.len()).unwrap_or(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== Reader Loop ==========
|
|
||||||
|
|
||||||
async fn reader_loop(
|
|
||||||
mut rd: tokio::io::ReadHalf<TcpStream>,
|
|
||||||
dk: [u8; 32],
|
|
||||||
mut div: [u8; 16],
|
|
||||||
reg: Arc<ConnRegistry>,
|
|
||||||
mut enc_leftover: BytesMut,
|
|
||||||
mut dec: BytesMut,
|
|
||||||
writer: Arc<Mutex<RpcWriter>>,
|
|
||||||
) -> Result<()> {
|
|
||||||
let mut raw = enc_leftover;
|
|
||||||
loop {
|
|
||||||
let mut tmp = [0u8; 16384];
|
|
||||||
let n = rd.read(&mut tmp).await.map_err(ProxyError::Io)?;
|
|
||||||
if n == 0 { return Ok(()); }
|
|
||||||
raw.extend_from_slice(&tmp[..n]);
|
|
||||||
|
|
||||||
// Decrypt complete 16-byte blocks
|
|
||||||
let blocks = raw.len() / 16 * 16;
|
|
||||||
if blocks > 0 {
|
|
||||||
let mut new_iv = [0u8; 16];
|
|
||||||
new_iv.copy_from_slice(&raw[blocks - 16..blocks]);
|
|
||||||
let mut chunk = vec![0u8; blocks];
|
|
||||||
chunk.copy_from_slice(&raw[..blocks]);
|
|
||||||
AesCbc::new(dk, div)
|
|
||||||
.decrypt_in_place(&mut chunk)
|
|
||||||
.map_err(|e| ProxyError::Crypto(format!("{}", e)))?;
|
|
||||||
div = new_iv;
|
|
||||||
dec.extend_from_slice(&chunk);
|
|
||||||
let _ = raw.split_to(blocks);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse RPC frames
|
|
||||||
while dec.len() >= 12 {
|
|
||||||
let fl = u32::from_le_bytes([dec[0], dec[1], dec[2], dec[3]]) as usize;
|
|
||||||
if fl == 4 { let _ = dec.split_to(4); continue; }
|
|
||||||
if fl < 12 || fl > (1 << 24) {
|
|
||||||
warn!(frame_len = fl, "Invalid RPC frame len");
|
|
||||||
dec.clear();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if dec.len() < fl { break; }
|
|
||||||
|
|
||||||
let frame = dec.split_to(fl);
|
|
||||||
let pe = fl - 4;
|
|
||||||
let ec = u32::from_le_bytes([frame[pe], frame[pe+1], frame[pe+2], frame[pe+3]]);
|
|
||||||
if crc32(&frame[..pe]) != ec {
|
|
||||||
warn!("CRC mismatch in data frame");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let payload = &frame[8..pe];
|
|
||||||
if payload.len() < 4 { continue; }
|
|
||||||
let pt = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]);
|
|
||||||
let body = &payload[4..];
|
|
||||||
|
|
||||||
if pt == RPC_PROXY_ANS_U32 && body.len() >= 12 {
|
|
||||||
let flags = u32::from_le_bytes(body[0..4].try_into().unwrap());
|
|
||||||
let cid = u64::from_le_bytes(body[4..12].try_into().unwrap());
|
|
||||||
let data = Bytes::copy_from_slice(&body[12..]);
|
|
||||||
trace!(cid, len = data.len(), flags, "ANS");
|
|
||||||
reg.route(cid, MeResponse::Data(data)).await;
|
|
||||||
} else if pt == RPC_SIMPLE_ACK_U32 && body.len() >= 12 {
|
|
||||||
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
|
|
||||||
let cfm = u32::from_le_bytes(body[8..12].try_into().unwrap());
|
|
||||||
trace!(cid, cfm, "ACK");
|
|
||||||
reg.route(cid, MeResponse::Ack(cfm)).await;
|
|
||||||
} else if pt == RPC_CLOSE_EXT_U32 && body.len() >= 8 {
|
|
||||||
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
|
|
||||||
debug!(cid, "CLOSE_EXT from ME");
|
|
||||||
reg.route(cid, MeResponse::Close).await;
|
|
||||||
reg.unregister(cid).await;
|
|
||||||
} else if pt == RPC_CLOSE_CONN_U32 && body.len() >= 8 {
|
|
||||||
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
|
|
||||||
debug!(cid, "CLOSE_CONN from ME");
|
|
||||||
reg.route(cid, MeResponse::Close).await;
|
|
||||||
reg.unregister(cid).await;
|
|
||||||
} else if pt == RPC_PING_U32 && body.len() >= 8 {
|
|
||||||
let ping_id = i64::from_le_bytes(body[0..8].try_into().unwrap());
|
|
||||||
trace!(ping_id, "RPC_PING -> PONG");
|
|
||||||
let mut pong = Vec::with_capacity(12);
|
|
||||||
pong.extend_from_slice(&RPC_PONG_U32.to_le_bytes());
|
|
||||||
pong.extend_from_slice(&ping_id.to_le_bytes());
|
|
||||||
if let Err(e) = writer.lock().await.send(&pong).await {
|
|
||||||
warn!(error = %e, "PONG send failed");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
debug!(rpc_type = format_args!("0x{:08x}", pt), len = body.len(), "Unknown RPC");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ========== Proto flags ==========
|
|
||||||
|
|
||||||
/// Map ProtoTag to C-compatible RPC_PROXY_REQ transport flags.
|
|
||||||
/// C: RPC_F_COMPACT(0x40000000)=abridged, RPC_F_MEDIUM(0x20000000)=intermediate/secure
|
|
||||||
/// The 0x1000(magic) and 0x8(proxy_tag) are added inside build_proxy_req_payload.
|
|
||||||
|
|
||||||
pub fn proto_flags_for_tag(tag: crate::protocol::constants::ProtoTag) -> u32 {
|
|
||||||
use crate::protocol::constants::*;
|
|
||||||
let mut flags = RPC_FLAG_HAS_AD_TAG | RPC_FLAG_MAGIC | RPC_FLAG_EXTMODE2;
|
|
||||||
match tag {
|
|
||||||
ProtoTag::Abridged => flags | RPC_FLAG_ABRIDGED,
|
|
||||||
ProtoTag::Intermediate => flags | RPC_FLAG_INTERMEDIATE,
|
|
||||||
ProtoTag::Secure => flags | RPC_FLAG_PAD | RPC_FLAG_INTERMEDIATE,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// ========== Health Monitor (Phase 4) ==========
|
|
||||||
|
|
||||||
pub async fn me_health_monitor(
|
|
||||||
pool: Arc<MePool>,
|
|
||||||
rng: Arc<SecureRandom>,
|
|
||||||
min_connections: usize,
|
|
||||||
) {
|
|
||||||
loop {
|
|
||||||
tokio::time::sleep(Duration::from_secs(30)).await;
|
|
||||||
let current = pool.writers.read().await.len();
|
|
||||||
if current < min_connections {
|
|
||||||
warn!(current, min = min_connections, "ME pool below minimum, reconnecting...");
|
|
||||||
let addrs = TG_MIDDLE_PROXIES_FLAT_V4.clone();
|
|
||||||
for &(ip, port) in addrs.iter() {
|
|
||||||
let needed = min_connections.saturating_sub(pool.writers.read().await.len());
|
|
||||||
if needed == 0 { break; }
|
|
||||||
for _ in 0..needed {
|
|
||||||
let addr = SocketAddr::new(ip, port);
|
|
||||||
match pool.connect_one(addr, &rng).await {
|
|
||||||
Ok(()) => info!(%addr, "ME reconnected"),
|
|
||||||
Err(e) => debug!(%addr, error = %e, "ME reconnect failed"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -176,4 +176,9 @@ impl RpcWriter {
|
|||||||
}
|
}
|
||||||
self.writer.write_all(&buf).await.map_err(ProxyError::Io)
|
self.writer.write_all(&buf).await.map_err(ProxyError::Io)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn send_and_flush(&mut self, payload: &[u8]) -> Result<()> {
|
||||||
|
self.send(payload).await?;
|
||||||
|
self.writer.flush().await.map_err(ProxyError::Io)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
183
src/transport/middle_proxy/config_updater.rs
Normal file
183
src/transport/middle_proxy/config_updater.rs
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use httpdate;
|
||||||
|
use tracing::{debug, info, warn};
|
||||||
|
|
||||||
|
use crate::error::Result;
|
||||||
|
|
||||||
|
use super::MePool;
|
||||||
|
use super::secret::download_proxy_secret;
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use std::time::SystemTime;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct ProxyConfigData {
|
||||||
|
pub map: HashMap<i32, Vec<(IpAddr, u16)>>,
|
||||||
|
pub default_dc: Option<i32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_host_port(s: &str) -> Option<(IpAddr, u16)> {
|
||||||
|
if let Some(bracket_end) = s.rfind(']') {
|
||||||
|
if s.starts_with('[') && bracket_end + 1 < s.len() && s.as_bytes().get(bracket_end + 1) == Some(&b':') {
|
||||||
|
let host = &s[1..bracket_end];
|
||||||
|
let port_str = &s[bracket_end + 2..];
|
||||||
|
let ip = host.parse::<IpAddr>().ok()?;
|
||||||
|
let port = port_str.parse::<u16>().ok()?;
|
||||||
|
return Some((ip, port));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let idx = s.rfind(':')?;
|
||||||
|
let host = &s[..idx];
|
||||||
|
let port_str = &s[idx + 1..];
|
||||||
|
let ip = host.parse::<IpAddr>().ok()?;
|
||||||
|
let port = port_str.parse::<u16>().ok()?;
|
||||||
|
Some((ip, port))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_proxy_line(line: &str) -> Option<(i32, IpAddr, u16)> {
|
||||||
|
// Accepts lines like:
|
||||||
|
// proxy_for 4 91.108.4.195:8888;
|
||||||
|
// proxy_for 2 [2001:67c:04e8:f002::d]:80;
|
||||||
|
// proxy_for 2 2001:67c:04e8:f002::d:80;
|
||||||
|
let trimmed = line.trim();
|
||||||
|
if !trimmed.starts_with("proxy_for") {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
// Capture everything between dc and trailing ';'
|
||||||
|
let without_prefix = trimmed.trim_start_matches("proxy_for").trim();
|
||||||
|
let mut parts = without_prefix.split_whitespace();
|
||||||
|
let dc_str = parts.next()?;
|
||||||
|
let rest = parts.next()?;
|
||||||
|
let host_port = rest.trim_end_matches(';');
|
||||||
|
let dc = dc_str.parse::<i32>().ok()?;
|
||||||
|
let (ip, port) = parse_host_port(host_port)?;
|
||||||
|
Some((dc, ip, port))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn fetch_proxy_config(url: &str) -> Result<ProxyConfigData> {
|
||||||
|
let resp = reqwest::get(url)
|
||||||
|
.await
|
||||||
|
.map_err(|e| crate::error::ProxyError::Proxy(format!("fetch_proxy_config GET failed: {e}")))?
|
||||||
|
;
|
||||||
|
|
||||||
|
if let Some(date) = resp.headers().get(reqwest::header::DATE) {
|
||||||
|
if let Ok(date_str) = date.to_str() {
|
||||||
|
if let Ok(server_time) = httpdate::parse_http_date(date_str) {
|
||||||
|
if let Ok(skew) = SystemTime::now().duration_since(server_time).or_else(|e| {
|
||||||
|
server_time.duration_since(SystemTime::now()).map_err(|_| e)
|
||||||
|
}) {
|
||||||
|
let skew_secs = skew.as_secs();
|
||||||
|
if skew_secs > 60 {
|
||||||
|
warn!(skew_secs, "Time skew >60s detected from fetch_proxy_config Date header");
|
||||||
|
} else if skew_secs > 30 {
|
||||||
|
warn!(skew_secs, "Time skew >30s detected from fetch_proxy_config Date header");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let text = resp
|
||||||
|
.text()
|
||||||
|
.await
|
||||||
|
.map_err(|e| crate::error::ProxyError::Proxy(format!("fetch_proxy_config read failed: {e}")))?;
|
||||||
|
|
||||||
|
let mut map: HashMap<i32, Vec<(IpAddr, u16)>> = HashMap::new();
|
||||||
|
for line in text.lines() {
|
||||||
|
if let Some((dc, ip, port)) = parse_proxy_line(line) {
|
||||||
|
map.entry(dc).or_default().push((ip, port));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let default_dc = text
|
||||||
|
.lines()
|
||||||
|
.find_map(|l| {
|
||||||
|
let t = l.trim();
|
||||||
|
if let Some(rest) = t.strip_prefix("default") {
|
||||||
|
return rest
|
||||||
|
.trim()
|
||||||
|
.trim_end_matches(';')
|
||||||
|
.parse::<i32>()
|
||||||
|
.ok();
|
||||||
|
}
|
||||||
|
None
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(ProxyConfigData { map, default_dc })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn me_config_updater(pool: Arc<MePool>, rng: Arc<SecureRandom>, interval: Duration) {
|
||||||
|
let mut tick = tokio::time::interval(interval);
|
||||||
|
// skip immediate tick to avoid double-fetch right after startup
|
||||||
|
tick.tick().await;
|
||||||
|
loop {
|
||||||
|
tick.tick().await;
|
||||||
|
|
||||||
|
// Update proxy config v4
|
||||||
|
if let Ok(cfg) = fetch_proxy_config("https://core.telegram.org/getProxyConfig").await {
|
||||||
|
let changed = pool.update_proxy_maps(cfg.map.clone(), None).await;
|
||||||
|
if let Some(dc) = cfg.default_dc {
|
||||||
|
pool.default_dc.store(dc, std::sync::atomic::Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
if changed {
|
||||||
|
info!("ME config updated (v4), reconciling connections");
|
||||||
|
pool.reconcile_connections(&rng).await;
|
||||||
|
} else {
|
||||||
|
debug!("ME config v4 unchanged");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("getProxyConfig update failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update proxy config v6 (optional)
|
||||||
|
if let Ok(cfg_v6) = fetch_proxy_config("https://core.telegram.org/getProxyConfigV6").await {
|
||||||
|
let _ = pool.update_proxy_maps(HashMap::new(), Some(cfg_v6.map)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update proxy-secret
|
||||||
|
match download_proxy_secret().await {
|
||||||
|
Ok(secret) => {
|
||||||
|
if pool.update_secret(secret).await {
|
||||||
|
info!("proxy-secret updated and pool reconnect scheduled");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => warn!(error = %e, "proxy-secret update failed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_ipv6_bracketed() {
|
||||||
|
let line = "proxy_for 2 [2001:67c:04e8:f002::d]:80;";
|
||||||
|
let res = parse_proxy_line(line).unwrap();
|
||||||
|
assert_eq!(res.0, 2);
|
||||||
|
assert_eq!(res.1, "2001:67c:04e8:f002::d".parse::<IpAddr>().unwrap());
|
||||||
|
assert_eq!(res.2, 80);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_ipv6_plain() {
|
||||||
|
let line = "proxy_for 2 2001:67c:04e8:f002::d:80;";
|
||||||
|
let res = parse_proxy_line(line).unwrap();
|
||||||
|
assert_eq!(res.0, 2);
|
||||||
|
assert_eq!(res.1, "2001:67c:04e8:f002::d".parse::<IpAddr>().unwrap());
|
||||||
|
assert_eq!(res.2, 80);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_ipv4() {
|
||||||
|
let line = "proxy_for 4 91.108.4.195:8888;";
|
||||||
|
let res = parse_proxy_line(line).unwrap();
|
||||||
|
assert_eq!(res.0, 4);
|
||||||
|
assert_eq!(res.1, "91.108.4.195".parse::<IpAddr>().unwrap());
|
||||||
|
assert_eq!(res.2, 8888);
|
||||||
|
}
|
||||||
|
}
|
||||||
439
src/transport/middle_proxy/handshake.rs
Normal file
439
src/transport/middle_proxy/handshake.rs
Normal file
@@ -0,0 +1,439 @@
|
|||||||
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
use socket2::{SockRef, TcpKeepalive};
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
use libc;
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
use std::os::fd::{AsRawFd, RawFd};
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
use std::os::raw::c_int;
|
||||||
|
|
||||||
|
use bytes::BytesMut;
|
||||||
|
use tokio::io::{AsyncReadExt, AsyncWriteExt, ReadHalf, WriteHalf};
|
||||||
|
use tokio::net::{TcpStream, TcpSocket};
|
||||||
|
use tokio::time::timeout;
|
||||||
|
use tracing::{debug, info, warn};
|
||||||
|
|
||||||
|
use crate::crypto::{SecureRandom, build_middleproxy_prekey, derive_middleproxy_keys, sha256};
|
||||||
|
use crate::error::{ProxyError, Result};
|
||||||
|
use crate::network::IpFamily;
|
||||||
|
use crate::protocol::constants::{
|
||||||
|
ME_CONNECT_TIMEOUT_SECS, ME_HANDSHAKE_TIMEOUT_SECS, RPC_CRYPTO_AES_U32, RPC_HANDSHAKE_ERROR_U32,
|
||||||
|
RPC_HANDSHAKE_U32, RPC_PING_U32, RPC_PONG_U32, RPC_NONCE_U32,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::codec::{
|
||||||
|
build_handshake_payload, build_nonce_payload, build_rpc_frame, cbc_decrypt_inplace,
|
||||||
|
cbc_encrypt_padded, parse_nonce_payload, read_rpc_frame_plaintext,
|
||||||
|
};
|
||||||
|
use super::wire::{extract_ip_material, IpMaterial};
|
||||||
|
use super::MePool;
|
||||||
|
|
||||||
|
/// Result of a successful ME handshake with timings.
|
||||||
|
pub(crate) struct HandshakeOutput {
|
||||||
|
pub rd: ReadHalf<TcpStream>,
|
||||||
|
pub wr: WriteHalf<TcpStream>,
|
||||||
|
pub read_key: [u8; 32],
|
||||||
|
pub read_iv: [u8; 16],
|
||||||
|
pub write_key: [u8; 32],
|
||||||
|
pub write_iv: [u8; 16],
|
||||||
|
pub handshake_ms: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MePool {
|
||||||
|
/// TCP connect with timeout + return RTT in milliseconds.
|
||||||
|
pub(crate) async fn connect_tcp(&self, addr: SocketAddr) -> Result<(TcpStream, f64)> {
|
||||||
|
let start = Instant::now();
|
||||||
|
let connect_fut = async {
|
||||||
|
if addr.is_ipv6() {
|
||||||
|
if let Some(v6) = self.detected_ipv6 {
|
||||||
|
match TcpSocket::new_v6() {
|
||||||
|
Ok(sock) => {
|
||||||
|
if let Err(e) = sock.bind(SocketAddr::new(IpAddr::V6(v6), 0)) {
|
||||||
|
debug!(error = %e, bind_ip = %v6, "ME IPv6 bind failed, falling back to default bind");
|
||||||
|
} else {
|
||||||
|
match sock.connect(addr).await {
|
||||||
|
Ok(stream) => return Ok(stream),
|
||||||
|
Err(e) => debug!(error = %e, target = %addr, "ME IPv6 bound connect failed, retrying default connect"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => debug!(error = %e, "ME IPv6 socket creation failed, falling back to default connect"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
TcpStream::connect(addr).await
|
||||||
|
};
|
||||||
|
|
||||||
|
let stream = timeout(Duration::from_secs(ME_CONNECT_TIMEOUT_SECS), connect_fut)
|
||||||
|
.await
|
||||||
|
.map_err(|_| ProxyError::ConnectionTimeout { addr: addr.to_string() })??;
|
||||||
|
let connect_ms = start.elapsed().as_secs_f64() * 1000.0;
|
||||||
|
stream.set_nodelay(true).ok();
|
||||||
|
if let Err(e) = Self::configure_keepalive(&stream) {
|
||||||
|
warn!(error = %e, "ME keepalive setup failed");
|
||||||
|
}
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
if let Err(e) = Self::configure_user_timeout(stream.as_raw_fd()) {
|
||||||
|
warn!(error = %e, "ME TCP_USER_TIMEOUT setup failed");
|
||||||
|
}
|
||||||
|
Ok((stream, connect_ms))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn configure_keepalive(stream: &TcpStream) -> std::io::Result<()> {
|
||||||
|
let sock = SockRef::from(stream);
|
||||||
|
let ka = TcpKeepalive::new()
|
||||||
|
.with_time(Duration::from_secs(30))
|
||||||
|
.with_interval(Duration::from_secs(10))
|
||||||
|
.with_retries(3);
|
||||||
|
sock.set_tcp_keepalive(&ka)?;
|
||||||
|
sock.set_keepalive(true)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
fn configure_user_timeout(fd: RawFd) -> std::io::Result<()> {
|
||||||
|
let timeout_ms: c_int = 30_000;
|
||||||
|
let rc = unsafe {
|
||||||
|
libc::setsockopt(
|
||||||
|
fd,
|
||||||
|
libc::IPPROTO_TCP,
|
||||||
|
libc::TCP_USER_TIMEOUT,
|
||||||
|
&timeout_ms as *const _ as *const libc::c_void,
|
||||||
|
std::mem::size_of_val(&timeout_ms) as libc::socklen_t,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
if rc != 0 {
|
||||||
|
return Err(std::io::Error::last_os_error());
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform full ME RPC handshake on an established TCP stream.
|
||||||
|
/// Returns cipher keys/ivs and split halves; does not register writer.
|
||||||
|
pub(crate) async fn handshake_only(
|
||||||
|
&self,
|
||||||
|
stream: TcpStream,
|
||||||
|
addr: SocketAddr,
|
||||||
|
rng: &SecureRandom,
|
||||||
|
) -> Result<HandshakeOutput> {
|
||||||
|
let hs_start = Instant::now();
|
||||||
|
|
||||||
|
let local_addr = stream.local_addr().map_err(ProxyError::Io)?;
|
||||||
|
let peer_addr = stream.peer_addr().map_err(ProxyError::Io)?;
|
||||||
|
|
||||||
|
let _ = self.maybe_detect_nat_ip(local_addr.ip()).await;
|
||||||
|
let family = if local_addr.ip().is_ipv4() {
|
||||||
|
IpFamily::V4
|
||||||
|
} else {
|
||||||
|
IpFamily::V6
|
||||||
|
};
|
||||||
|
let reflected = if self.nat_probe {
|
||||||
|
self.maybe_reflect_public_addr(family).await
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let local_addr_nat = self.translate_our_addr_with_reflection(local_addr, reflected);
|
||||||
|
let peer_addr_nat = SocketAddr::new(self.translate_ip_for_nat(peer_addr.ip()), peer_addr.port());
|
||||||
|
let (mut rd, mut wr) = tokio::io::split(stream);
|
||||||
|
|
||||||
|
let my_nonce: [u8; 16] = rng.bytes(16).try_into().unwrap();
|
||||||
|
let crypto_ts = std::time::SystemTime::now()
|
||||||
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs() as u32;
|
||||||
|
|
||||||
|
let ks = self.key_selector().await;
|
||||||
|
let nonce_payload = build_nonce_payload(ks, crypto_ts, &my_nonce);
|
||||||
|
let nonce_frame = build_rpc_frame(-2, &nonce_payload);
|
||||||
|
let dump = hex_dump(&nonce_frame[..nonce_frame.len().min(44)]);
|
||||||
|
debug!(
|
||||||
|
key_selector = format_args!("0x{ks:08x}"),
|
||||||
|
crypto_ts,
|
||||||
|
frame_len = nonce_frame.len(),
|
||||||
|
nonce_frame_hex = %dump,
|
||||||
|
"Sending ME nonce frame"
|
||||||
|
);
|
||||||
|
wr.write_all(&nonce_frame).await.map_err(ProxyError::Io)?;
|
||||||
|
wr.flush().await.map_err(ProxyError::Io)?;
|
||||||
|
|
||||||
|
let (srv_seq, srv_nonce_payload) = timeout(
|
||||||
|
Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS),
|
||||||
|
read_rpc_frame_plaintext(&mut rd),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|_| ProxyError::TgHandshakeTimeout)??;
|
||||||
|
|
||||||
|
if srv_seq != -2 {
|
||||||
|
return Err(ProxyError::InvalidHandshake(format!("Expected seq=-2, got {srv_seq}")));
|
||||||
|
}
|
||||||
|
|
||||||
|
let (srv_key_select, schema, srv_ts, srv_nonce) = parse_nonce_payload(&srv_nonce_payload)?;
|
||||||
|
if schema != RPC_CRYPTO_AES_U32 {
|
||||||
|
warn!(schema = format_args!("0x{schema:08x}"), "Unsupported ME crypto schema");
|
||||||
|
return Err(ProxyError::InvalidHandshake(format!(
|
||||||
|
"Unsupported crypto schema: 0x{schema:x}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if srv_key_select != ks {
|
||||||
|
return Err(ProxyError::InvalidHandshake(format!(
|
||||||
|
"Server key_select 0x{srv_key_select:08x} != client 0x{ks:08x}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let skew = crypto_ts.abs_diff(srv_ts);
|
||||||
|
if skew > 30 {
|
||||||
|
return Err(ProxyError::InvalidHandshake(format!(
|
||||||
|
"nonce crypto_ts skew too large: client={crypto_ts}, server={srv_ts}, skew={skew}s"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
info!(
|
||||||
|
%local_addr,
|
||||||
|
%local_addr_nat,
|
||||||
|
reflected_ip = reflected.map(|r| r.ip()).as_ref().map(ToString::to_string),
|
||||||
|
%peer_addr,
|
||||||
|
%peer_addr_nat,
|
||||||
|
key_selector = format_args!("0x{ks:08x}"),
|
||||||
|
crypto_schema = format_args!("0x{schema:08x}"),
|
||||||
|
skew_secs = skew,
|
||||||
|
"ME key derivation parameters"
|
||||||
|
);
|
||||||
|
|
||||||
|
let ts_bytes = crypto_ts.to_le_bytes();
|
||||||
|
let server_port_bytes = peer_addr_nat.port().to_le_bytes();
|
||||||
|
let client_port_bytes = local_addr_nat.port().to_le_bytes();
|
||||||
|
|
||||||
|
let server_ip = extract_ip_material(peer_addr_nat);
|
||||||
|
let client_ip = extract_ip_material(local_addr_nat);
|
||||||
|
|
||||||
|
let (srv_ip_opt, clt_ip_opt, clt_v6_opt, srv_v6_opt, hs_our_ip, hs_peer_ip) = match (server_ip, client_ip) {
|
||||||
|
(IpMaterial::V4(mut srv), IpMaterial::V4(mut clt)) => {
|
||||||
|
srv.reverse();
|
||||||
|
clt.reverse();
|
||||||
|
(Some(srv), Some(clt), None, None, clt, srv)
|
||||||
|
}
|
||||||
|
(IpMaterial::V6(srv), IpMaterial::V6(clt)) => {
|
||||||
|
let zero = [0u8; 4];
|
||||||
|
(None, None, Some(clt), Some(srv), zero, zero)
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
return Err(ProxyError::InvalidHandshake(
|
||||||
|
"mixed IPv4/IPv6 endpoints are not supported for ME key derivation".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let diag_level: u8 = std::env::var("ME_DIAG").ok().and_then(|v| v.parse().ok()).unwrap_or(0);
|
||||||
|
|
||||||
|
let secret: Vec<u8> = self.proxy_secret.read().await.clone();
|
||||||
|
|
||||||
|
let prekey_client = build_middleproxy_prekey(
|
||||||
|
&srv_nonce,
|
||||||
|
&my_nonce,
|
||||||
|
&ts_bytes,
|
||||||
|
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||||
|
&client_port_bytes,
|
||||||
|
b"CLIENT",
|
||||||
|
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||||
|
&server_port_bytes,
|
||||||
|
&secret,
|
||||||
|
clt_v6_opt.as_ref(),
|
||||||
|
srv_v6_opt.as_ref(),
|
||||||
|
);
|
||||||
|
let prekey_server = build_middleproxy_prekey(
|
||||||
|
&srv_nonce,
|
||||||
|
&my_nonce,
|
||||||
|
&ts_bytes,
|
||||||
|
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||||
|
&client_port_bytes,
|
||||||
|
b"SERVER",
|
||||||
|
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||||
|
&server_port_bytes,
|
||||||
|
&secret,
|
||||||
|
clt_v6_opt.as_ref(),
|
||||||
|
srv_v6_opt.as_ref(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let (wk, wi) = derive_middleproxy_keys(
|
||||||
|
&srv_nonce,
|
||||||
|
&my_nonce,
|
||||||
|
&ts_bytes,
|
||||||
|
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||||
|
&client_port_bytes,
|
||||||
|
b"CLIENT",
|
||||||
|
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||||
|
&server_port_bytes,
|
||||||
|
&secret,
|
||||||
|
clt_v6_opt.as_ref(),
|
||||||
|
srv_v6_opt.as_ref(),
|
||||||
|
);
|
||||||
|
let (rk, ri) = derive_middleproxy_keys(
|
||||||
|
&srv_nonce,
|
||||||
|
&my_nonce,
|
||||||
|
&ts_bytes,
|
||||||
|
srv_ip_opt.as_ref().map(|x| &x[..]),
|
||||||
|
&client_port_bytes,
|
||||||
|
b"SERVER",
|
||||||
|
clt_ip_opt.as_ref().map(|x| &x[..]),
|
||||||
|
&server_port_bytes,
|
||||||
|
&secret,
|
||||||
|
clt_v6_opt.as_ref(),
|
||||||
|
srv_v6_opt.as_ref(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let hs_payload = build_handshake_payload(hs_our_ip, local_addr.port(), hs_peer_ip, peer_addr.port());
|
||||||
|
let hs_frame = build_rpc_frame(-1, &hs_payload);
|
||||||
|
if diag_level >= 1 {
|
||||||
|
info!(
|
||||||
|
write_key = %hex_dump(&wk),
|
||||||
|
write_iv = %hex_dump(&wi),
|
||||||
|
read_key = %hex_dump(&rk),
|
||||||
|
read_iv = %hex_dump(&ri),
|
||||||
|
srv_ip = %srv_ip_opt.map(|ip| hex_dump(&ip)).unwrap_or_default(),
|
||||||
|
clt_ip = %clt_ip_opt.map(|ip| hex_dump(&ip)).unwrap_or_default(),
|
||||||
|
srv_port = %hex_dump(&server_port_bytes),
|
||||||
|
clt_port = %hex_dump(&client_port_bytes),
|
||||||
|
crypto_ts = %hex_dump(&ts_bytes),
|
||||||
|
nonce_srv = %hex_dump(&srv_nonce),
|
||||||
|
nonce_clt = %hex_dump(&my_nonce),
|
||||||
|
prekey_sha256_client = %hex_dump(&sha256(&prekey_client)),
|
||||||
|
prekey_sha256_server = %hex_dump(&sha256(&prekey_server)),
|
||||||
|
hs_plain = %hex_dump(&hs_frame),
|
||||||
|
proxy_secret_sha256 = %hex_dump(&sha256(&secret)),
|
||||||
|
"ME diag: derived keys and handshake plaintext"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if diag_level >= 2 {
|
||||||
|
info!(
|
||||||
|
prekey_client = %hex_dump(&prekey_client),
|
||||||
|
prekey_server = %hex_dump(&prekey_server),
|
||||||
|
"ME diag: full prekey buffers"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (encrypted_hs, mut write_iv) = cbc_encrypt_padded(&wk, &wi, &hs_frame)?;
|
||||||
|
if diag_level >= 1 {
|
||||||
|
info!(
|
||||||
|
hs_cipher = %hex_dump(&encrypted_hs),
|
||||||
|
"ME diag: handshake ciphertext"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
wr.write_all(&encrypted_hs).await.map_err(ProxyError::Io)?;
|
||||||
|
wr.flush().await.map_err(ProxyError::Io)?;
|
||||||
|
|
||||||
|
let deadline = Instant::now() + Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS);
|
||||||
|
let mut enc_buf = BytesMut::with_capacity(256);
|
||||||
|
let mut dec_buf = BytesMut::with_capacity(256);
|
||||||
|
let mut read_iv = ri;
|
||||||
|
let mut handshake_ok = false;
|
||||||
|
|
||||||
|
while Instant::now() < deadline && !handshake_ok {
|
||||||
|
let remaining = deadline - Instant::now();
|
||||||
|
let mut tmp = [0u8; 256];
|
||||||
|
let n = match timeout(remaining, rd.read(&mut tmp)).await {
|
||||||
|
Ok(Ok(0)) => {
|
||||||
|
return Err(ProxyError::Io(std::io::Error::new(
|
||||||
|
std::io::ErrorKind::UnexpectedEof,
|
||||||
|
"ME closed during handshake",
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
Ok(Ok(n)) => n,
|
||||||
|
Ok(Err(e)) => return Err(ProxyError::Io(e)),
|
||||||
|
Err(_) => return Err(ProxyError::TgHandshakeTimeout),
|
||||||
|
};
|
||||||
|
|
||||||
|
enc_buf.extend_from_slice(&tmp[..n]);
|
||||||
|
|
||||||
|
let blocks = enc_buf.len() / 16 * 16;
|
||||||
|
if blocks > 0 {
|
||||||
|
let mut chunk = vec![0u8; blocks];
|
||||||
|
chunk.copy_from_slice(&enc_buf[..blocks]);
|
||||||
|
read_iv = cbc_decrypt_inplace(&rk, &read_iv, &mut chunk)?;
|
||||||
|
dec_buf.extend_from_slice(&chunk);
|
||||||
|
let _ = enc_buf.split_to(blocks);
|
||||||
|
}
|
||||||
|
|
||||||
|
while dec_buf.len() >= 4 {
|
||||||
|
let fl = u32::from_le_bytes(dec_buf[0..4].try_into().unwrap()) as usize;
|
||||||
|
|
||||||
|
if fl == 4 {
|
||||||
|
let _ = dec_buf.split_to(4);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if !(12..=(1 << 24)).contains(&fl) {
|
||||||
|
return Err(ProxyError::InvalidHandshake(format!(
|
||||||
|
"Bad HS response frame len: {fl}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
if dec_buf.len() < fl {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
let frame = dec_buf.split_to(fl);
|
||||||
|
let pe = fl - 4;
|
||||||
|
let ec = u32::from_le_bytes(frame[pe..pe + 4].try_into().unwrap());
|
||||||
|
let ac = crate::crypto::crc32(&frame[..pe]);
|
||||||
|
if ec != ac {
|
||||||
|
return Err(ProxyError::InvalidHandshake(format!(
|
||||||
|
"HS CRC mismatch: 0x{ec:08x} vs 0x{ac:08x}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let hs_type = u32::from_le_bytes(frame[8..12].try_into().unwrap());
|
||||||
|
if hs_type == RPC_HANDSHAKE_ERROR_U32 {
|
||||||
|
let err_code = if frame.len() >= 16 {
|
||||||
|
i32::from_le_bytes(frame[12..16].try_into().unwrap())
|
||||||
|
} else {
|
||||||
|
-1
|
||||||
|
};
|
||||||
|
return Err(ProxyError::InvalidHandshake(format!(
|
||||||
|
"ME rejected handshake (error={err_code})"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
if hs_type != RPC_HANDSHAKE_U32 {
|
||||||
|
return Err(ProxyError::InvalidHandshake(format!(
|
||||||
|
"Expected HANDSHAKE 0x{RPC_HANDSHAKE_U32:08x}, got 0x{hs_type:08x}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
handshake_ok = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !handshake_ok {
|
||||||
|
return Err(ProxyError::TgHandshakeTimeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
let handshake_ms = hs_start.elapsed().as_secs_f64() * 1000.0;
|
||||||
|
info!(%addr, "RPC handshake OK");
|
||||||
|
|
||||||
|
Ok(HandshakeOutput {
|
||||||
|
rd,
|
||||||
|
wr,
|
||||||
|
read_key: rk,
|
||||||
|
read_iv,
|
||||||
|
write_key: wk,
|
||||||
|
write_iv,
|
||||||
|
handshake_ms,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hex_dump(data: &[u8]) -> String {
|
||||||
|
const MAX: usize = 64;
|
||||||
|
let mut out = String::with_capacity(data.len() * 2 + 3);
|
||||||
|
for (i, b) in data.iter().take(MAX).enumerate() {
|
||||||
|
if i > 0 {
|
||||||
|
out.push(' ');
|
||||||
|
}
|
||||||
|
out.push_str(&format!("{b:02x}"));
|
||||||
|
}
|
||||||
|
if data.len() > MAX {
|
||||||
|
out.push_str(" …");
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
@@ -1,38 +1,174 @@
|
|||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use tracing::{debug, info, warn};
|
use tracing::{debug, info, warn};
|
||||||
|
use rand::seq::SliceRandom;
|
||||||
|
|
||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
use crate::protocol::constants::TG_MIDDLE_PROXIES_FLAT_V4;
|
use crate::network::IpFamily;
|
||||||
|
|
||||||
use super::MePool;
|
use super::MePool;
|
||||||
|
|
||||||
pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, min_connections: usize) {
|
pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_connections: usize) {
|
||||||
|
let mut backoff: HashMap<(i32, IpFamily), u64> = HashMap::new();
|
||||||
|
let mut last_attempt: HashMap<(i32, IpFamily), Instant> = HashMap::new();
|
||||||
|
let mut inflight_single: HashSet<(i32, IpFamily)> = HashSet::new();
|
||||||
loop {
|
loop {
|
||||||
tokio::time::sleep(Duration::from_secs(30)).await;
|
tokio::time::sleep(Duration::from_secs(30)).await;
|
||||||
let current = pool.connection_count();
|
check_family(
|
||||||
if current < min_connections {
|
IpFamily::V4,
|
||||||
warn!(
|
&pool,
|
||||||
current,
|
&rng,
|
||||||
min = min_connections,
|
&mut backoff,
|
||||||
"ME pool below minimum, reconnecting..."
|
&mut last_attempt,
|
||||||
);
|
&mut inflight_single,
|
||||||
let addrs = TG_MIDDLE_PROXIES_FLAT_V4.clone();
|
)
|
||||||
for &(ip, port) in addrs.iter() {
|
.await;
|
||||||
let needed = min_connections.saturating_sub(pool.connection_count());
|
check_family(
|
||||||
if needed == 0 {
|
IpFamily::V6,
|
||||||
break;
|
&pool,
|
||||||
|
&rng,
|
||||||
|
&mut backoff,
|
||||||
|
&mut last_attempt,
|
||||||
|
&mut inflight_single,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn check_family(
|
||||||
|
family: IpFamily,
|
||||||
|
pool: &Arc<MePool>,
|
||||||
|
rng: &Arc<SecureRandom>,
|
||||||
|
backoff: &mut HashMap<(i32, IpFamily), u64>,
|
||||||
|
last_attempt: &mut HashMap<(i32, IpFamily), Instant>,
|
||||||
|
inflight_single: &mut HashSet<(i32, IpFamily)>,
|
||||||
|
) {
|
||||||
|
let enabled = match family {
|
||||||
|
IpFamily::V4 => pool.decision.ipv4_me,
|
||||||
|
IpFamily::V6 => pool.decision.ipv6_me,
|
||||||
|
};
|
||||||
|
if !enabled {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let map = match family {
|
||||||
|
IpFamily::V4 => pool.proxy_map_v4.read().await.clone(),
|
||||||
|
IpFamily::V6 => pool.proxy_map_v6.read().await.clone(),
|
||||||
|
};
|
||||||
|
let writer_addrs: HashSet<SocketAddr> = pool
|
||||||
|
.writers
|
||||||
|
.read()
|
||||||
|
.await
|
||||||
|
.iter()
|
||||||
|
.map(|w| w.addr)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let entries: Vec<(i32, Vec<SocketAddr>)> = map
|
||||||
|
.iter()
|
||||||
|
.map(|(dc, addrs)| {
|
||||||
|
let list = addrs
|
||||||
|
.iter()
|
||||||
|
.map(|(ip, port)| SocketAddr::new(*ip, *port))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
(*dc, list)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for (dc, dc_addrs) in entries {
|
||||||
|
let has_coverage = dc_addrs.iter().any(|a| writer_addrs.contains(a));
|
||||||
|
if has_coverage {
|
||||||
|
inflight_single.remove(&(dc, family));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let key = (dc, family);
|
||||||
|
let delay = *backoff.get(&key).unwrap_or(&30);
|
||||||
|
let now = Instant::now();
|
||||||
|
if let Some(last) = last_attempt.get(&key) {
|
||||||
|
if now.duration_since(*last).as_secs() < delay {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dc_addrs.len() == 1 {
|
||||||
|
// Single ME address: fast retries then slower background retries.
|
||||||
|
if inflight_single.contains(&key) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
inflight_single.insert(key);
|
||||||
|
let addr = dc_addrs[0];
|
||||||
|
let dc_id = dc;
|
||||||
|
let pool_clone = pool.clone();
|
||||||
|
let rng_clone = rng.clone();
|
||||||
|
let timeout = pool.me_one_timeout;
|
||||||
|
let quick_attempts = pool.me_one_retry.max(1);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut success = false;
|
||||||
|
for _ in 0..quick_attempts {
|
||||||
|
let res = tokio::time::timeout(timeout, pool_clone.connect_one(addr, rng_clone.as_ref())).await;
|
||||||
|
match res {
|
||||||
|
Ok(Ok(())) => {
|
||||||
|
info!(%addr, dc = %dc_id, ?family, "ME reconnected for DC coverage");
|
||||||
|
success = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Ok(Err(e)) => debug!(%addr, dc = %dc_id, error = %e, ?family, "ME reconnect failed"),
|
||||||
|
Err(_) => debug!(%addr, dc = %dc_id, ?family, "ME reconnect timed out"),
|
||||||
|
}
|
||||||
|
tokio::time::sleep(Duration::from_millis(1000)).await;
|
||||||
}
|
}
|
||||||
for _ in 0..needed {
|
if success {
|
||||||
let addr = SocketAddr::new(ip, port);
|
return;
|
||||||
match pool.connect_one(addr, &rng).await {
|
}
|
||||||
Ok(()) => info!(%addr, "ME reconnected"),
|
let timeout_ms = timeout.as_millis();
|
||||||
Err(e) => debug!(%addr, error = %e, "ME reconnect failed"),
|
warn!(
|
||||||
|
dc = %dc_id,
|
||||||
|
?family,
|
||||||
|
attempts = quick_attempts,
|
||||||
|
timeout_ms,
|
||||||
|
"DC={} has no ME coverage: {} tries * {} ms... retry in 5 seconds...",
|
||||||
|
dc_id,
|
||||||
|
quick_attempts,
|
||||||
|
timeout_ms
|
||||||
|
);
|
||||||
|
loop {
|
||||||
|
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||||
|
let res = tokio::time::timeout(timeout, pool_clone.connect_one(addr, rng_clone.as_ref())).await;
|
||||||
|
match res {
|
||||||
|
Ok(Ok(())) => {
|
||||||
|
info!(%addr, dc = %dc_id, ?family, "ME reconnected for DC coverage");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Ok(Err(e)) => debug!(%addr, dc = %dc_id, error = %e, ?family, "ME reconnect failed"),
|
||||||
|
Err(_) => debug!(%addr, dc = %dc_id, ?family, "ME reconnect timed out"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// will drop inflight flag in outer loop when coverage detected
|
||||||
|
});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
warn!(dc = %dc, delay, ?family, "DC has no ME coverage, reconnecting...");
|
||||||
|
let mut shuffled = dc_addrs.clone();
|
||||||
|
shuffled.shuffle(&mut rand::rng());
|
||||||
|
let mut reconnected = false;
|
||||||
|
for addr in shuffled {
|
||||||
|
match pool.connect_one(addr, rng.as_ref()).await {
|
||||||
|
Ok(()) => {
|
||||||
|
info!(%addr, dc = %dc, ?family, "ME reconnected for DC coverage");
|
||||||
|
backoff.insert(key, 30);
|
||||||
|
last_attempt.insert(key, now);
|
||||||
|
reconnected = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(e) => debug!(%addr, dc = %dc, error = %e, ?family, "ME reconnect failed"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !reconnected {
|
||||||
|
let next = (*backoff.get(&key).unwrap_or(&30)).saturating_mul(2).min(300);
|
||||||
|
backoff.insert(key, next);
|
||||||
|
last_attempt.insert(key, now);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,29 @@
|
|||||||
//! Middle Proxy RPC transport.
|
//! Middle Proxy RPC transport.
|
||||||
|
|
||||||
mod codec;
|
mod codec;
|
||||||
|
mod handshake;
|
||||||
mod health;
|
mod health;
|
||||||
mod pool;
|
mod pool;
|
||||||
mod pool_nat;
|
mod pool_nat;
|
||||||
|
mod ping;
|
||||||
mod reader;
|
mod reader;
|
||||||
mod registry;
|
mod registry;
|
||||||
mod send;
|
mod send;
|
||||||
mod secret;
|
mod secret;
|
||||||
|
mod rotation;
|
||||||
|
mod config_updater;
|
||||||
mod wire;
|
mod wire;
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
|
|
||||||
pub use health::me_health_monitor;
|
pub use health::me_health_monitor;
|
||||||
|
pub use ping::{run_me_ping, format_sample_line, MePingReport, MePingSample, MePingFamily};
|
||||||
pub use pool::MePool;
|
pub use pool::MePool;
|
||||||
|
pub use pool_nat::{stun_probe, detect_public_ip};
|
||||||
pub use registry::ConnRegistry;
|
pub use registry::ConnRegistry;
|
||||||
pub use secret::fetch_proxy_secret;
|
pub use secret::fetch_proxy_secret;
|
||||||
|
pub use config_updater::{fetch_proxy_config, me_config_updater};
|
||||||
|
pub use rotation::me_rotation_task;
|
||||||
pub use wire::proto_flags_for_tag;
|
pub use wire::proto_flags_for_tag;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
|||||||
173
src/transport/middle_proxy/ping.rs
Normal file
173
src/transport/middle_proxy/ping.rs
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::error::ProxyError;
|
||||||
|
|
||||||
|
use super::MePool;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum MePingFamily {
|
||||||
|
V4,
|
||||||
|
V6,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct MePingSample {
|
||||||
|
pub dc: i32,
|
||||||
|
pub addr: SocketAddr,
|
||||||
|
pub connect_ms: Option<f64>,
|
||||||
|
pub handshake_ms: Option<f64>,
|
||||||
|
pub error: Option<String>,
|
||||||
|
pub family: MePingFamily,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct MePingReport {
|
||||||
|
pub dc: i32,
|
||||||
|
pub family: MePingFamily,
|
||||||
|
pub samples: Vec<MePingSample>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn format_sample_line(sample: &MePingSample) -> String {
|
||||||
|
let sign = if sample.dc >= 0 { "+" } else { "-" };
|
||||||
|
let addr = format!("{}:{}", sample.addr.ip(), sample.addr.port());
|
||||||
|
|
||||||
|
match (sample.connect_ms, sample.handshake_ms.as_ref(), sample.error.as_ref()) {
|
||||||
|
(Some(conn), Some(hs), None) => format!(
|
||||||
|
" {sign} {addr}\tPing: {:.0} ms / RPC: {:.0} ms / OK",
|
||||||
|
conn, hs
|
||||||
|
),
|
||||||
|
(Some(conn), None, Some(err)) => format!(
|
||||||
|
" {sign} {addr}\tPing: {:.0} ms / RPC: FAIL ({err})",
|
||||||
|
conn
|
||||||
|
),
|
||||||
|
(None, _, Some(err)) => format!(" {sign} {addr}\tPing: FAIL ({err})"),
|
||||||
|
(Some(conn), None, None) => format!(" {sign} {addr}\tPing: {:.0} ms / RPC: FAIL", conn),
|
||||||
|
_ => format!(" {sign} {addr}\tPing: FAIL"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
|
|
||||||
|
fn sample(base: MePingSample) -> MePingSample {
|
||||||
|
base
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ok_line_contains_both_timings() {
|
||||||
|
let s = sample(MePingSample {
|
||||||
|
dc: 4,
|
||||||
|
addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8888),
|
||||||
|
connect_ms: Some(12.3),
|
||||||
|
handshake_ms: Some(34.7),
|
||||||
|
error: None,
|
||||||
|
family: MePingFamily::V4,
|
||||||
|
});
|
||||||
|
let line = format_sample_line(&s);
|
||||||
|
assert!(line.contains("Ping: 12 ms"));
|
||||||
|
assert!(line.contains("RPC: 35 ms"));
|
||||||
|
assert!(line.contains("OK"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn error_line_mentions_reason() {
|
||||||
|
let s = sample(MePingSample {
|
||||||
|
dc: -5,
|
||||||
|
addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(5, 6, 7, 8)), 80),
|
||||||
|
connect_ms: Some(10.0),
|
||||||
|
handshake_ms: None,
|
||||||
|
error: Some("handshake timeout".to_string()),
|
||||||
|
family: MePingFamily::V4,
|
||||||
|
});
|
||||||
|
let line = format_sample_line(&s);
|
||||||
|
assert!(line.contains("- 5.6.7.8:80"));
|
||||||
|
assert!(line.contains("handshake timeout"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_me_ping(pool: &Arc<MePool>, rng: &SecureRandom) -> Vec<MePingReport> {
|
||||||
|
let mut reports = Vec::new();
|
||||||
|
|
||||||
|
let v4_map = if pool.decision.ipv4_me {
|
||||||
|
pool.proxy_map_v4.read().await.clone()
|
||||||
|
} else {
|
||||||
|
HashMap::new()
|
||||||
|
};
|
||||||
|
let v6_map = if pool.decision.ipv6_me {
|
||||||
|
pool.proxy_map_v6.read().await.clone()
|
||||||
|
} else {
|
||||||
|
HashMap::new()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut grouped: Vec<(MePingFamily, i32, Vec<(IpAddr, u16)>)> = Vec::new();
|
||||||
|
for (dc, addrs) in v4_map {
|
||||||
|
grouped.push((MePingFamily::V4, dc, addrs));
|
||||||
|
}
|
||||||
|
for (dc, addrs) in v6_map {
|
||||||
|
grouped.push((MePingFamily::V6, dc, addrs));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (family, dc, addrs) in grouped {
|
||||||
|
let mut samples = Vec::new();
|
||||||
|
for (ip, port) in addrs {
|
||||||
|
let addr = SocketAddr::new(ip, port);
|
||||||
|
let mut connect_ms = None;
|
||||||
|
let mut handshake_ms = None;
|
||||||
|
let mut error = None;
|
||||||
|
|
||||||
|
match pool.connect_tcp(addr).await {
|
||||||
|
Ok((stream, conn_rtt)) => {
|
||||||
|
connect_ms = Some(conn_rtt);
|
||||||
|
match pool.handshake_only(stream, addr, rng).await {
|
||||||
|
Ok(hs) => {
|
||||||
|
handshake_ms = Some(hs.handshake_ms);
|
||||||
|
// drop halves to close
|
||||||
|
drop(hs.rd);
|
||||||
|
drop(hs.wr);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error = Some(short_err(&e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error = Some(short_err(&e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
samples.push(MePingSample {
|
||||||
|
dc,
|
||||||
|
addr,
|
||||||
|
connect_ms,
|
||||||
|
handshake_ms,
|
||||||
|
error,
|
||||||
|
family,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
reports.push(MePingReport {
|
||||||
|
dc,
|
||||||
|
family,
|
||||||
|
samples,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
reports
|
||||||
|
}
|
||||||
|
|
||||||
|
fn short_err(err: &ProxyError) -> String {
|
||||||
|
match err {
|
||||||
|
ProxyError::ConnectionTimeout { .. } => "connect timeout".to_string(),
|
||||||
|
ProxyError::TgHandshakeTimeout => "handshake timeout".to_string(),
|
||||||
|
ProxyError::InvalidHandshake(e) => format!("bad handshake: {e}"),
|
||||||
|
ProxyError::Crypto(e) => format!("crypto: {e}"),
|
||||||
|
ProxyError::Proxy(e) => format!("proxy: {e}"),
|
||||||
|
ProxyError::Io(e) => format!("io: {e}"),
|
||||||
|
_ => format!("{err}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,45 +1,73 @@
|
|||||||
use std::net::{IpAddr, SocketAddr};
|
use std::collections::HashMap;
|
||||||
|
use std::net::{IpAddr, Ipv6Addr, SocketAddr};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::OnceLock;
|
use std::sync::atomic::{AtomicBool, AtomicI32, AtomicU64, Ordering};
|
||||||
use std::sync::atomic::AtomicU64;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
use rand::seq::SliceRandom;
|
||||||
use tokio::net::TcpStream;
|
|
||||||
use tokio::sync::{Mutex, RwLock};
|
use tokio::sync::{Mutex, RwLock};
|
||||||
use tokio::time::{Instant, timeout};
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::{debug, info, warn};
|
use tracing::{debug, info, warn};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use crate::crypto::{SecureRandom, build_middleproxy_prekey, derive_middleproxy_keys, sha256};
|
use crate::crypto::SecureRandom;
|
||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
|
use crate::network::probe::NetworkDecision;
|
||||||
|
use crate::network::IpFamily;
|
||||||
use crate::protocol::constants::*;
|
use crate::protocol::constants::*;
|
||||||
|
|
||||||
use super::ConnRegistry;
|
use super::ConnRegistry;
|
||||||
use super::codec::{
|
use super::registry::{BoundConn, ConnMeta};
|
||||||
RpcWriter, build_handshake_payload, build_nonce_payload, build_rpc_frame, cbc_decrypt_inplace,
|
use super::codec::RpcWriter;
|
||||||
cbc_encrypt_padded, parse_nonce_payload, read_rpc_frame_plaintext,
|
|
||||||
};
|
|
||||||
use super::reader::reader_loop;
|
use super::reader::reader_loop;
|
||||||
use super::wire::{IpMaterial, extract_ip_material};
|
use super::MeResponse;
|
||||||
|
|
||||||
const ME_ACTIVE_PING_SECS: u64 = 25;
|
const ME_ACTIVE_PING_SECS: u64 = 25;
|
||||||
const ME_ACTIVE_PING_JITTER_SECS: i64 = 5;
|
const ME_ACTIVE_PING_JITTER_SECS: i64 = 5;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct MeWriter {
|
||||||
|
pub id: u64,
|
||||||
|
pub addr: SocketAddr,
|
||||||
|
pub writer: Arc<Mutex<RpcWriter>>,
|
||||||
|
pub cancel: CancellationToken,
|
||||||
|
pub degraded: Arc<AtomicBool>,
|
||||||
|
pub draining: Arc<AtomicBool>,
|
||||||
|
}
|
||||||
|
|
||||||
pub struct MePool {
|
pub struct MePool {
|
||||||
pub(super) registry: Arc<ConnRegistry>,
|
pub(super) registry: Arc<ConnRegistry>,
|
||||||
pub(super) writers: Arc<RwLock<Vec<(SocketAddr, Arc<Mutex<RpcWriter>>)>>> ,
|
pub(super) writers: Arc<RwLock<Vec<MeWriter>>>,
|
||||||
pub(super) rr: AtomicU64,
|
pub(super) rr: AtomicU64,
|
||||||
|
pub(super) decision: NetworkDecision,
|
||||||
|
pub(super) rng: Arc<SecureRandom>,
|
||||||
pub(super) proxy_tag: Option<Vec<u8>>,
|
pub(super) proxy_tag: Option<Vec<u8>>,
|
||||||
proxy_secret: Vec<u8>,
|
pub(super) proxy_secret: Arc<RwLock<Vec<u8>>>,
|
||||||
pub(super) nat_ip_cfg: Option<IpAddr>,
|
pub(super) nat_ip_cfg: Option<IpAddr>,
|
||||||
pub(super) nat_ip_detected: OnceLock<IpAddr>,
|
pub(super) nat_ip_detected: Arc<RwLock<Option<IpAddr>>>,
|
||||||
pub(super) nat_probe: bool,
|
pub(super) nat_probe: bool,
|
||||||
pub(super) nat_stun: Option<String>,
|
pub(super) nat_stun: Option<String>,
|
||||||
|
pub(super) detected_ipv6: Option<Ipv6Addr>,
|
||||||
|
pub(super) nat_probe_attempts: std::sync::atomic::AtomicU8,
|
||||||
|
pub(super) nat_probe_disabled: std::sync::atomic::AtomicBool,
|
||||||
|
pub(super) me_one_retry: u8,
|
||||||
|
pub(super) me_one_timeout: Duration,
|
||||||
|
pub(super) proxy_map_v4: Arc<RwLock<HashMap<i32, Vec<(IpAddr, u16)>>>>,
|
||||||
|
pub(super) proxy_map_v6: Arc<RwLock<HashMap<i32, Vec<(IpAddr, u16)>>>>,
|
||||||
|
pub(super) default_dc: AtomicI32,
|
||||||
|
pub(super) next_writer_id: AtomicU64,
|
||||||
|
pub(super) ping_tracker: Arc<Mutex<HashMap<i64, (std::time::Instant, u64)>>>,
|
||||||
|
pub(super) rtt_stats: Arc<Mutex<HashMap<u64, (f64, f64)>>>,
|
||||||
|
pub(super) nat_reflection_cache: Arc<Mutex<NatReflectionCache>>,
|
||||||
pool_size: usize,
|
pool_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct NatReflectionCache {
|
||||||
|
pub v4: Option<(std::time::Instant, std::net::SocketAddr)>,
|
||||||
|
pub v6: Option<(std::time::Instant, std::net::SocketAddr)>,
|
||||||
|
}
|
||||||
|
|
||||||
impl MePool {
|
impl MePool {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
proxy_tag: Option<Vec<u8>>,
|
proxy_tag: Option<Vec<u8>>,
|
||||||
@@ -47,18 +75,40 @@ impl MePool {
|
|||||||
nat_ip: Option<IpAddr>,
|
nat_ip: Option<IpAddr>,
|
||||||
nat_probe: bool,
|
nat_probe: bool,
|
||||||
nat_stun: Option<String>,
|
nat_stun: Option<String>,
|
||||||
|
detected_ipv6: Option<Ipv6Addr>,
|
||||||
|
me_one_retry: u8,
|
||||||
|
me_one_timeout_ms: u64,
|
||||||
|
proxy_map_v4: HashMap<i32, Vec<(IpAddr, u16)>>,
|
||||||
|
proxy_map_v6: HashMap<i32, Vec<(IpAddr, u16)>>,
|
||||||
|
default_dc: Option<i32>,
|
||||||
|
decision: NetworkDecision,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
Arc::new(Self {
|
Arc::new(Self {
|
||||||
registry: Arc::new(ConnRegistry::new()),
|
registry: Arc::new(ConnRegistry::new()),
|
||||||
writers: Arc::new(RwLock::new(Vec::new())),
|
writers: Arc::new(RwLock::new(Vec::new())),
|
||||||
rr: AtomicU64::new(0),
|
rr: AtomicU64::new(0),
|
||||||
|
decision,
|
||||||
|
rng,
|
||||||
proxy_tag,
|
proxy_tag,
|
||||||
proxy_secret,
|
proxy_secret: Arc::new(RwLock::new(proxy_secret)),
|
||||||
nat_ip_cfg: nat_ip,
|
nat_ip_cfg: nat_ip,
|
||||||
nat_ip_detected: OnceLock::new(),
|
nat_ip_detected: Arc::new(RwLock::new(None)),
|
||||||
nat_probe,
|
nat_probe,
|
||||||
nat_stun,
|
nat_stun,
|
||||||
|
detected_ipv6,
|
||||||
|
nat_probe_attempts: std::sync::atomic::AtomicU8::new(0),
|
||||||
|
nat_probe_disabled: std::sync::atomic::AtomicBool::new(false),
|
||||||
|
me_one_retry,
|
||||||
|
me_one_timeout: Duration::from_millis(me_one_timeout_ms),
|
||||||
pool_size: 2,
|
pool_size: 2,
|
||||||
|
proxy_map_v4: Arc::new(RwLock::new(proxy_map_v4)),
|
||||||
|
proxy_map_v6: Arc::new(RwLock::new(proxy_map_v6)),
|
||||||
|
default_dc: AtomicI32::new(default_dc.unwrap_or(0)),
|
||||||
|
next_writer_id: AtomicU64::new(1),
|
||||||
|
ping_tracker: Arc::new(Mutex::new(HashMap::new())),
|
||||||
|
rtt_stats: Arc::new(Mutex::new(HashMap::new())),
|
||||||
|
nat_reflection_cache: Arc::new(Mutex::new(NatReflectionCache::default())),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,44 +125,175 @@ impl MePool {
|
|||||||
&self.registry
|
&self.registry
|
||||||
}
|
}
|
||||||
|
|
||||||
fn writers_arc(&self) -> Arc<RwLock<Vec<(SocketAddr, Arc<Mutex<RpcWriter>>)>>>
|
fn writers_arc(&self) -> Arc<RwLock<Vec<MeWriter>>> {
|
||||||
{
|
|
||||||
self.writers.clone()
|
self.writers.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn key_selector(&self) -> u32 {
|
pub async fn reconcile_connections(self: &Arc<Self>, rng: &SecureRandom) {
|
||||||
if self.proxy_secret.len() >= 4 {
|
use std::collections::HashSet;
|
||||||
u32::from_le_bytes([
|
let writers = self.writers.read().await;
|
||||||
self.proxy_secret[0],
|
let current: HashSet<SocketAddr> = writers.iter().map(|w| w.addr).collect();
|
||||||
self.proxy_secret[1],
|
drop(writers);
|
||||||
self.proxy_secret[2],
|
|
||||||
self.proxy_secret[3],
|
for family in self.family_order() {
|
||||||
])
|
let map = self.proxy_map_for_family(family).await;
|
||||||
|
for (_dc, addrs) in map.iter() {
|
||||||
|
let dc_addrs: Vec<SocketAddr> = addrs
|
||||||
|
.iter()
|
||||||
|
.map(|(ip, port)| SocketAddr::new(*ip, *port))
|
||||||
|
.collect();
|
||||||
|
if !dc_addrs.iter().any(|a| current.contains(a)) {
|
||||||
|
let mut shuffled = dc_addrs.clone();
|
||||||
|
shuffled.shuffle(&mut rand::rng());
|
||||||
|
for addr in shuffled {
|
||||||
|
if self.connect_one(addr, rng).await.is_ok() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !self.decision.effective_multipath && !current.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_proxy_maps(
|
||||||
|
&self,
|
||||||
|
new_v4: HashMap<i32, Vec<(IpAddr, u16)>>,
|
||||||
|
new_v6: Option<HashMap<i32, Vec<(IpAddr, u16)>>>,
|
||||||
|
) -> bool {
|
||||||
|
let mut changed = false;
|
||||||
|
{
|
||||||
|
let mut guard = self.proxy_map_v4.write().await;
|
||||||
|
if !new_v4.is_empty() && *guard != new_v4 {
|
||||||
|
*guard = new_v4;
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(v6) = new_v6 {
|
||||||
|
let mut guard = self.proxy_map_v6.write().await;
|
||||||
|
if !v6.is_empty() && *guard != v6 {
|
||||||
|
*guard = v6;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
changed
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn update_secret(&self, new_secret: Vec<u8>) -> bool {
|
||||||
|
if new_secret.len() < 32 {
|
||||||
|
warn!(len = new_secret.len(), "proxy-secret update ignored (too short)");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
let mut guard = self.proxy_secret.write().await;
|
||||||
|
if *guard != new_secret {
|
||||||
|
*guard = new_secret;
|
||||||
|
drop(guard);
|
||||||
|
self.reconnect_all().await;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn reconnect_all(&self) {
|
||||||
|
// Graceful: do not drop all at once. New connections will use updated secret.
|
||||||
|
// Existing writers remain until health monitor replaces them.
|
||||||
|
// No-op here to avoid total outage.
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) async fn key_selector(&self) -> u32 {
|
||||||
|
let secret = self.proxy_secret.read().await;
|
||||||
|
if secret.len() >= 4 {
|
||||||
|
u32::from_le_bytes([secret[0], secret[1], secret[2], secret[3]])
|
||||||
} else {
|
} else {
|
||||||
0
|
0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn init(self: &Arc<Self>, pool_size: usize, rng: &SecureRandom) -> Result<()> {
|
pub(super) fn family_order(&self) -> Vec<IpFamily> {
|
||||||
let addrs = &*TG_MIDDLE_PROXIES_FLAT_V4;
|
let mut order = Vec::new();
|
||||||
let ks = self.key_selector();
|
if self.decision.prefer_ipv6() {
|
||||||
|
if self.decision.ipv6_me {
|
||||||
|
order.push(IpFamily::V6);
|
||||||
|
}
|
||||||
|
if self.decision.ipv4_me {
|
||||||
|
order.push(IpFamily::V4);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if self.decision.ipv4_me {
|
||||||
|
order.push(IpFamily::V4);
|
||||||
|
}
|
||||||
|
if self.decision.ipv6_me {
|
||||||
|
order.push(IpFamily::V6);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
order
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn proxy_map_for_family(&self, family: IpFamily) -> HashMap<i32, Vec<(IpAddr, u16)>> {
|
||||||
|
match family {
|
||||||
|
IpFamily::V4 => self.proxy_map_v4.read().await.clone(),
|
||||||
|
IpFamily::V6 => self.proxy_map_v6.read().await.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn init(self: &Arc<Self>, pool_size: usize, rng: &Arc<SecureRandom>) -> Result<()> {
|
||||||
|
let family_order = self.family_order();
|
||||||
|
let ks = self.key_selector().await;
|
||||||
info!(
|
info!(
|
||||||
me_servers = addrs.len(),
|
me_servers = self.proxy_map_v4.read().await.len(),
|
||||||
pool_size,
|
pool_size,
|
||||||
key_selector = format_args!("0x{ks:08x}"),
|
key_selector = format_args!("0x{ks:08x}"),
|
||||||
secret_len = self.proxy_secret.len(),
|
secret_len = self.proxy_secret.read().await.len(),
|
||||||
"Initializing ME pool"
|
"Initializing ME pool"
|
||||||
);
|
);
|
||||||
|
|
||||||
for &(ip, port) in addrs.iter() {
|
for family in family_order {
|
||||||
for i in 0..pool_size {
|
let map = self.proxy_map_for_family(family).await;
|
||||||
let addr = SocketAddr::new(ip, port);
|
let dc_addrs: Vec<(i32, Vec<(IpAddr, u16)>)> = map
|
||||||
match self.connect_one(addr, rng).await {
|
.iter()
|
||||||
Ok(()) => info!(%addr, idx = i, "ME connected"),
|
.map(|(dc, addrs)| (*dc, addrs.clone()))
|
||||||
Err(e) => warn!(%addr, idx = i, error = %e, "ME connect failed"),
|
.collect();
|
||||||
|
|
||||||
|
// Ensure at least one connection per DC; run DCs in parallel.
|
||||||
|
let mut join = tokio::task::JoinSet::new();
|
||||||
|
let mut dc_failures = 0usize;
|
||||||
|
for (dc, addrs) in dc_addrs.iter().cloned() {
|
||||||
|
if addrs.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let pool = Arc::clone(self);
|
||||||
|
let rng_clone = Arc::clone(rng);
|
||||||
|
join.spawn(async move {
|
||||||
|
pool.connect_primary_for_dc(dc, addrs, rng_clone).await
|
||||||
|
});
|
||||||
|
}
|
||||||
|
while let Some(res) = join.join_next().await {
|
||||||
|
if let Ok(false) = res {
|
||||||
|
dc_failures += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if self.writers.read().await.len() >= pool_size {
|
if dc_failures > 2 {
|
||||||
|
return Err(ProxyError::Proxy("Too many ME DC init failures, falling back to direct".into()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Additional connections up to pool_size total (round-robin across DCs)
|
||||||
|
for (dc, addrs) in dc_addrs.iter() {
|
||||||
|
for (ip, port) in addrs {
|
||||||
|
if self.connection_count() >= pool_size {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let addr = SocketAddr::new(*ip, *port);
|
||||||
|
if let Err(e) = self.connect_one(addr, rng.as_ref()).await {
|
||||||
|
debug!(%addr, dc = %dc, error = %e, "Extra ME connect failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if self.connection_count() >= pool_size {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !self.decision.effective_multipath && self.connection_count() > 0 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -123,356 +304,100 @@ impl MePool {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn connect_one(
|
pub(crate) async fn connect_one(self: &Arc<Self>, addr: SocketAddr, rng: &SecureRandom) -> Result<()> {
|
||||||
self: &Arc<Self>,
|
let secret_len = self.proxy_secret.read().await.len();
|
||||||
addr: SocketAddr,
|
if secret_len < 32 {
|
||||||
rng: &SecureRandom,
|
return Err(ProxyError::Proxy("proxy-secret too short for ME auth".into()));
|
||||||
) -> Result<()> {
|
|
||||||
let secret = &self.proxy_secret;
|
|
||||||
if secret.len() < 32 {
|
|
||||||
return Err(ProxyError::Proxy(
|
|
||||||
"proxy-secret too short for ME auth".into(),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let stream = timeout(
|
let (stream, _connect_ms) = self.connect_tcp(addr).await?;
|
||||||
Duration::from_secs(ME_CONNECT_TIMEOUT_SECS),
|
let hs = self.handshake_only(stream, addr, rng).await?;
|
||||||
TcpStream::connect(addr),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(|_| ProxyError::ConnectionTimeout {
|
|
||||||
addr: addr.to_string(),
|
|
||||||
})?
|
|
||||||
.map_err(ProxyError::Io)?;
|
|
||||||
stream.set_nodelay(true).ok();
|
|
||||||
|
|
||||||
let local_addr = stream.local_addr().map_err(ProxyError::Io)?;
|
|
||||||
let peer_addr = stream.peer_addr().map_err(ProxyError::Io)?;
|
|
||||||
let _ = self.maybe_detect_nat_ip(local_addr.ip()).await;
|
|
||||||
let reflected = if self.nat_probe {
|
|
||||||
self.maybe_reflect_public_addr().await
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
let local_addr_nat = self.translate_our_addr_with_reflection(local_addr, reflected);
|
|
||||||
let peer_addr_nat =
|
|
||||||
SocketAddr::new(self.translate_ip_for_nat(peer_addr.ip()), peer_addr.port());
|
|
||||||
let (mut rd, mut wr) = tokio::io::split(stream);
|
|
||||||
|
|
||||||
let my_nonce: [u8; 16] = rng.bytes(16).try_into().unwrap();
|
|
||||||
let crypto_ts = std::time::SystemTime::now()
|
|
||||||
.duration_since(std::time::UNIX_EPOCH)
|
|
||||||
.unwrap_or_default()
|
|
||||||
.as_secs() as u32;
|
|
||||||
|
|
||||||
let ks = self.key_selector();
|
|
||||||
let nonce_payload = build_nonce_payload(ks, crypto_ts, &my_nonce);
|
|
||||||
let nonce_frame = build_rpc_frame(-2, &nonce_payload);
|
|
||||||
let dump = hex_dump(&nonce_frame[..nonce_frame.len().min(44)]);
|
|
||||||
info!(
|
|
||||||
key_selector = format_args!("0x{ks:08x}"),
|
|
||||||
crypto_ts,
|
|
||||||
frame_len = nonce_frame.len(),
|
|
||||||
nonce_frame_hex = %dump,
|
|
||||||
"Sending ME nonce frame"
|
|
||||||
);
|
|
||||||
wr.write_all(&nonce_frame).await.map_err(ProxyError::Io)?;
|
|
||||||
wr.flush().await.map_err(ProxyError::Io)?;
|
|
||||||
|
|
||||||
let (srv_seq, srv_nonce_payload) = timeout(
|
|
||||||
Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS),
|
|
||||||
read_rpc_frame_plaintext(&mut rd),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.map_err(|_| ProxyError::TgHandshakeTimeout)??;
|
|
||||||
|
|
||||||
if srv_seq != -2 {
|
|
||||||
return Err(ProxyError::InvalidHandshake(format!(
|
|
||||||
"Expected seq=-2, got {srv_seq}"
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let (srv_key_select, schema, srv_ts, srv_nonce) = parse_nonce_payload(&srv_nonce_payload)?;
|
|
||||||
if schema != RPC_CRYPTO_AES_U32 {
|
|
||||||
warn!(schema = format_args!("0x{schema:08x}"), "Unsupported ME crypto schema");
|
|
||||||
return Err(ProxyError::InvalidHandshake(format!(
|
|
||||||
"Unsupported crypto schema: 0x{schema:x}"
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if srv_key_select != ks {
|
|
||||||
return Err(ProxyError::InvalidHandshake(format!(
|
|
||||||
"Server key_select 0x{srv_key_select:08x} != client 0x{ks:08x}"
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let skew = crypto_ts.abs_diff(srv_ts);
|
|
||||||
if skew > 30 {
|
|
||||||
return Err(ProxyError::InvalidHandshake(format!(
|
|
||||||
"nonce crypto_ts skew too large: client={crypto_ts}, server={srv_ts}, skew={skew}s"
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
info!(
|
|
||||||
%local_addr,
|
|
||||||
%local_addr_nat,
|
|
||||||
reflected_ip = reflected.map(|r| r.ip()).as_ref().map(ToString::to_string),
|
|
||||||
%peer_addr,
|
|
||||||
%peer_addr_nat,
|
|
||||||
key_selector = format_args!("0x{ks:08x}"),
|
|
||||||
crypto_schema = format_args!("0x{schema:08x}"),
|
|
||||||
skew_secs = skew,
|
|
||||||
"ME key derivation parameters"
|
|
||||||
);
|
|
||||||
|
|
||||||
let ts_bytes = crypto_ts.to_le_bytes();
|
|
||||||
let server_port_bytes = peer_addr_nat.port().to_le_bytes();
|
|
||||||
let client_port_bytes = local_addr_nat.port().to_le_bytes();
|
|
||||||
|
|
||||||
let server_ip = extract_ip_material(peer_addr_nat);
|
|
||||||
let client_ip = extract_ip_material(local_addr_nat);
|
|
||||||
|
|
||||||
let (srv_ip_opt, clt_ip_opt, clt_v6_opt, srv_v6_opt, hs_our_ip, hs_peer_ip) =
|
|
||||||
match (server_ip, client_ip) {
|
|
||||||
(IpMaterial::V4(srv), IpMaterial::V4(clt)) => {
|
|
||||||
(Some(srv), Some(clt), None, None, clt, srv)
|
|
||||||
}
|
|
||||||
(IpMaterial::V6(srv), IpMaterial::V6(clt)) => {
|
|
||||||
let zero = [0u8; 4];
|
|
||||||
(None, None, Some(clt), Some(srv), zero, zero)
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
return Err(ProxyError::InvalidHandshake(
|
|
||||||
"mixed IPv4/IPv6 endpoints are not supported for ME key derivation"
|
|
||||||
.to_string(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let diag_level: u8 = std::env::var("ME_DIAG")
|
|
||||||
.ok()
|
|
||||||
.and_then(|v| v.parse().ok())
|
|
||||||
.unwrap_or(0);
|
|
||||||
|
|
||||||
let prekey_client = build_middleproxy_prekey(
|
|
||||||
&srv_nonce,
|
|
||||||
&my_nonce,
|
|
||||||
&ts_bytes,
|
|
||||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
|
||||||
&client_port_bytes,
|
|
||||||
b"CLIENT",
|
|
||||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
|
||||||
&server_port_bytes,
|
|
||||||
secret,
|
|
||||||
clt_v6_opt.as_ref(),
|
|
||||||
srv_v6_opt.as_ref(),
|
|
||||||
);
|
|
||||||
let prekey_server = build_middleproxy_prekey(
|
|
||||||
&srv_nonce,
|
|
||||||
&my_nonce,
|
|
||||||
&ts_bytes,
|
|
||||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
|
||||||
&client_port_bytes,
|
|
||||||
b"SERVER",
|
|
||||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
|
||||||
&server_port_bytes,
|
|
||||||
secret,
|
|
||||||
clt_v6_opt.as_ref(),
|
|
||||||
srv_v6_opt.as_ref(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let (wk, wi) = derive_middleproxy_keys(
|
|
||||||
&srv_nonce,
|
|
||||||
&my_nonce,
|
|
||||||
&ts_bytes,
|
|
||||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
|
||||||
&client_port_bytes,
|
|
||||||
b"CLIENT",
|
|
||||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
|
||||||
&server_port_bytes,
|
|
||||||
secret,
|
|
||||||
clt_v6_opt.as_ref(),
|
|
||||||
srv_v6_opt.as_ref(),
|
|
||||||
);
|
|
||||||
let (rk, ri) = derive_middleproxy_keys(
|
|
||||||
&srv_nonce,
|
|
||||||
&my_nonce,
|
|
||||||
&ts_bytes,
|
|
||||||
srv_ip_opt.as_ref().map(|x| &x[..]),
|
|
||||||
&client_port_bytes,
|
|
||||||
b"SERVER",
|
|
||||||
clt_ip_opt.as_ref().map(|x| &x[..]),
|
|
||||||
&server_port_bytes,
|
|
||||||
secret,
|
|
||||||
clt_v6_opt.as_ref(),
|
|
||||||
srv_v6_opt.as_ref(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let hs_payload =
|
|
||||||
build_handshake_payload(hs_our_ip, local_addr.port(), hs_peer_ip, peer_addr.port());
|
|
||||||
let hs_frame = build_rpc_frame(-1, &hs_payload);
|
|
||||||
if diag_level >= 1 {
|
|
||||||
info!(
|
|
||||||
write_key = %hex_dump(&wk),
|
|
||||||
write_iv = %hex_dump(&wi),
|
|
||||||
read_key = %hex_dump(&rk),
|
|
||||||
read_iv = %hex_dump(&ri),
|
|
||||||
srv_ip = %srv_ip_opt.map(|ip| hex_dump(&ip)).unwrap_or_default(),
|
|
||||||
clt_ip = %clt_ip_opt.map(|ip| hex_dump(&ip)).unwrap_or_default(),
|
|
||||||
srv_port = %hex_dump(&server_port_bytes),
|
|
||||||
clt_port = %hex_dump(&client_port_bytes),
|
|
||||||
crypto_ts = %hex_dump(&ts_bytes),
|
|
||||||
nonce_srv = %hex_dump(&srv_nonce),
|
|
||||||
nonce_clt = %hex_dump(&my_nonce),
|
|
||||||
prekey_sha256_client = %hex_dump(&sha256(&prekey_client)),
|
|
||||||
prekey_sha256_server = %hex_dump(&sha256(&prekey_server)),
|
|
||||||
hs_plain = %hex_dump(&hs_frame),
|
|
||||||
proxy_secret_sha256 = %hex_dump(&sha256(secret)),
|
|
||||||
"ME diag: derived keys and handshake plaintext"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if diag_level >= 2 {
|
|
||||||
info!(
|
|
||||||
prekey_client = %hex_dump(&prekey_client),
|
|
||||||
prekey_server = %hex_dump(&prekey_server),
|
|
||||||
"ME diag: full prekey buffers"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let (encrypted_hs, write_iv) = cbc_encrypt_padded(&wk, &wi, &hs_frame)?;
|
|
||||||
if diag_level >= 1 {
|
|
||||||
info!(
|
|
||||||
hs_cipher = %hex_dump(&encrypted_hs),
|
|
||||||
"ME diag: handshake ciphertext"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
wr.write_all(&encrypted_hs).await.map_err(ProxyError::Io)?;
|
|
||||||
wr.flush().await.map_err(ProxyError::Io)?;
|
|
||||||
|
|
||||||
let deadline = Instant::now() + Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS);
|
|
||||||
let mut enc_buf = BytesMut::with_capacity(256);
|
|
||||||
let mut dec_buf = BytesMut::with_capacity(256);
|
|
||||||
let mut read_iv = ri;
|
|
||||||
let mut handshake_ok = false;
|
|
||||||
|
|
||||||
while Instant::now() < deadline && !handshake_ok {
|
|
||||||
let remaining = deadline - Instant::now();
|
|
||||||
let mut tmp = [0u8; 256];
|
|
||||||
let n = match timeout(remaining, rd.read(&mut tmp)).await {
|
|
||||||
Ok(Ok(0)) => {
|
|
||||||
return Err(ProxyError::Io(std::io::Error::new(
|
|
||||||
std::io::ErrorKind::UnexpectedEof,
|
|
||||||
"ME closed during handshake",
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
Ok(Ok(n)) => n,
|
|
||||||
Ok(Err(e)) => return Err(ProxyError::Io(e)),
|
|
||||||
Err(_) => return Err(ProxyError::TgHandshakeTimeout),
|
|
||||||
};
|
|
||||||
|
|
||||||
enc_buf.extend_from_slice(&tmp[..n]);
|
|
||||||
|
|
||||||
let blocks = enc_buf.len() / 16 * 16;
|
|
||||||
if blocks > 0 {
|
|
||||||
let mut chunk = vec![0u8; blocks];
|
|
||||||
chunk.copy_from_slice(&enc_buf[..blocks]);
|
|
||||||
read_iv = cbc_decrypt_inplace(&rk, &read_iv, &mut chunk)?;
|
|
||||||
dec_buf.extend_from_slice(&chunk);
|
|
||||||
let _ = enc_buf.split_to(blocks);
|
|
||||||
}
|
|
||||||
|
|
||||||
while dec_buf.len() >= 4 {
|
|
||||||
let fl = u32::from_le_bytes(dec_buf[0..4].try_into().unwrap()) as usize;
|
|
||||||
|
|
||||||
if fl == 4 {
|
|
||||||
let _ = dec_buf.split_to(4);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if !(12..=(1 << 24)).contains(&fl) {
|
|
||||||
return Err(ProxyError::InvalidHandshake(format!(
|
|
||||||
"Bad HS response frame len: {fl}"
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
if dec_buf.len() < fl {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
let frame = dec_buf.split_to(fl);
|
|
||||||
let pe = fl - 4;
|
|
||||||
let ec = u32::from_le_bytes(frame[pe..pe + 4].try_into().unwrap());
|
|
||||||
let ac = crate::crypto::crc32(&frame[..pe]);
|
|
||||||
if ec != ac {
|
|
||||||
return Err(ProxyError::InvalidHandshake(format!(
|
|
||||||
"HS CRC mismatch: 0x{ec:08x} vs 0x{ac:08x}"
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let hs_type = u32::from_le_bytes(frame[8..12].try_into().unwrap());
|
|
||||||
if hs_type == RPC_HANDSHAKE_ERROR_U32 {
|
|
||||||
let err_code = if frame.len() >= 16 {
|
|
||||||
i32::from_le_bytes(frame[12..16].try_into().unwrap())
|
|
||||||
} else {
|
|
||||||
-1
|
|
||||||
};
|
|
||||||
return Err(ProxyError::InvalidHandshake(format!(
|
|
||||||
"ME rejected handshake (error={err_code})"
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
if hs_type != RPC_HANDSHAKE_U32 {
|
|
||||||
return Err(ProxyError::InvalidHandshake(format!(
|
|
||||||
"Expected HANDSHAKE 0x{RPC_HANDSHAKE_U32:08x}, got 0x{hs_type:08x}"
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
handshake_ok = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !handshake_ok {
|
|
||||||
return Err(ProxyError::TgHandshakeTimeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
info!(%addr, "RPC handshake OK");
|
|
||||||
|
|
||||||
|
let writer_id = self.next_writer_id.fetch_add(1, Ordering::Relaxed);
|
||||||
|
let cancel = CancellationToken::new();
|
||||||
|
let degraded = Arc::new(AtomicBool::new(false));
|
||||||
|
let draining = Arc::new(AtomicBool::new(false));
|
||||||
let rpc_w = Arc::new(Mutex::new(RpcWriter {
|
let rpc_w = Arc::new(Mutex::new(RpcWriter {
|
||||||
writer: wr,
|
writer: hs.wr,
|
||||||
key: wk,
|
key: hs.write_key,
|
||||||
iv: write_iv,
|
iv: hs.write_iv,
|
||||||
seq_no: 0,
|
seq_no: 0,
|
||||||
}));
|
}));
|
||||||
self.writers.write().await.push((addr, rpc_w.clone()));
|
let writer = MeWriter {
|
||||||
|
id: writer_id,
|
||||||
|
addr,
|
||||||
|
writer: rpc_w.clone(),
|
||||||
|
cancel: cancel.clone(),
|
||||||
|
degraded: degraded.clone(),
|
||||||
|
draining: draining.clone(),
|
||||||
|
};
|
||||||
|
self.writers.write().await.push(writer.clone());
|
||||||
|
|
||||||
let reg = self.registry.clone();
|
let reg = self.registry.clone();
|
||||||
let w_pong = rpc_w.clone();
|
let writers_arc = self.writers_arc();
|
||||||
let w_pool = self.writers_arc();
|
let ping_tracker = self.ping_tracker.clone();
|
||||||
let w_ping = rpc_w.clone();
|
let rtt_stats = self.rtt_stats.clone();
|
||||||
let w_pool_ping = self.writers_arc();
|
let pool = Arc::downgrade(self);
|
||||||
|
let cancel_ping = cancel.clone();
|
||||||
|
let rpc_w_ping = rpc_w.clone();
|
||||||
|
let ping_tracker_ping = ping_tracker.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(e) =
|
let cancel_reader = cancel.clone();
|
||||||
reader_loop(rd, rk, read_iv, reg, enc_buf, dec_buf, w_pong.clone()).await
|
let res = reader_loop(
|
||||||
{
|
hs.rd,
|
||||||
|
hs.read_key,
|
||||||
|
hs.read_iv,
|
||||||
|
reg.clone(),
|
||||||
|
BytesMut::new(),
|
||||||
|
BytesMut::new(),
|
||||||
|
rpc_w.clone(),
|
||||||
|
ping_tracker.clone(),
|
||||||
|
rtt_stats.clone(),
|
||||||
|
writer_id,
|
||||||
|
degraded.clone(),
|
||||||
|
cancel_reader.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
if let Some(pool) = pool.upgrade() {
|
||||||
|
pool.remove_writer_and_close_clients(writer_id).await;
|
||||||
|
}
|
||||||
|
if let Err(e) = res {
|
||||||
warn!(error = %e, "ME reader ended");
|
warn!(error = %e, "ME reader ended");
|
||||||
}
|
}
|
||||||
let mut ws = w_pool.write().await;
|
let mut ws = writers_arc.write().await;
|
||||||
ws.retain(|(_, w)| !Arc::ptr_eq(w, &w_pong));
|
ws.retain(|w| w.id != writer_id);
|
||||||
info!(remaining = ws.len(), "Dead ME writer removed from pool");
|
info!(remaining = ws.len(), "Dead ME writer removed from pool");
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let pool_ping = Arc::downgrade(self);
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let mut ping_id: i64 = rand::random::<i64>();
|
let mut ping_id: i64 = rand::random::<i64>();
|
||||||
loop {
|
loop {
|
||||||
let jitter = rand::rng()
|
let jitter = rand::rng()
|
||||||
.random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS);
|
.random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS);
|
||||||
let wait = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64;
|
let wait = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64;
|
||||||
tokio::time::sleep(Duration::from_secs(wait)).await;
|
tokio::select! {
|
||||||
|
_ = cancel_ping.cancelled() => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
_ = tokio::time::sleep(Duration::from_secs(wait)) => {}
|
||||||
|
}
|
||||||
|
let sent_id = ping_id;
|
||||||
let mut p = Vec::with_capacity(12);
|
let mut p = Vec::with_capacity(12);
|
||||||
p.extend_from_slice(&RPC_PING_U32.to_le_bytes());
|
p.extend_from_slice(&RPC_PING_U32.to_le_bytes());
|
||||||
p.extend_from_slice(&ping_id.to_le_bytes());
|
p.extend_from_slice(&sent_id.to_le_bytes());
|
||||||
|
{
|
||||||
|
let mut tracker = ping_tracker_ping.lock().await;
|
||||||
|
tracker.insert(sent_id, (std::time::Instant::now(), writer_id));
|
||||||
|
}
|
||||||
ping_id = ping_id.wrapping_add(1);
|
ping_id = ping_id.wrapping_add(1);
|
||||||
if let Err(e) = w_ping.lock().await.send(&p).await {
|
if let Err(e) = rpc_w_ping.lock().await.send_and_flush(&p).await {
|
||||||
debug!(error = %e, "Active ME ping failed, removing dead writer");
|
debug!(error = %e, "Active ME ping failed, removing dead writer");
|
||||||
let mut ws = w_pool_ping.write().await;
|
cancel_ping.cancel();
|
||||||
ws.retain(|(_, w)| !Arc::ptr_eq(w, &w_ping));
|
if let Some(pool) = pool_ping.upgrade() {
|
||||||
|
pool.remove_writer_and_close_clients(writer_id).await;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -481,6 +406,73 @@ impl MePool {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn connect_primary_for_dc(
|
||||||
|
self: Arc<Self>,
|
||||||
|
dc: i32,
|
||||||
|
mut addrs: Vec<(IpAddr, u16)>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
) -> bool {
|
||||||
|
if addrs.is_empty() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
addrs.shuffle(&mut rand::rng());
|
||||||
|
for (ip, port) in addrs {
|
||||||
|
let addr = SocketAddr::new(ip, port);
|
||||||
|
match self.connect_one(addr, rng.as_ref()).await {
|
||||||
|
Ok(()) => {
|
||||||
|
info!(%addr, dc = %dc, "ME connected");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
Err(e) => warn!(%addr, dc = %dc, error = %e, "ME connect failed, trying next"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
warn!(dc = %dc, "All ME servers for DC failed at init");
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn remove_writer_and_close_clients(&self, writer_id: u64) {
|
||||||
|
let conns = self.remove_writer_only(writer_id).await;
|
||||||
|
for bound in conns {
|
||||||
|
let _ = self.registry.route(bound.conn_id, super::MeResponse::Close).await;
|
||||||
|
let _ = self.registry.unregister(bound.conn_id).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn remove_writer_only(&self, writer_id: u64) -> Vec<BoundConn> {
|
||||||
|
{
|
||||||
|
let mut ws = self.writers.write().await;
|
||||||
|
if let Some(pos) = ws.iter().position(|w| w.id == writer_id) {
|
||||||
|
let w = ws.remove(pos);
|
||||||
|
w.cancel.cancel();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.registry.writer_lost(writer_id).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn mark_writer_draining(self: &Arc<Self>, writer_id: u64) {
|
||||||
|
{
|
||||||
|
let mut ws = self.writers.write().await;
|
||||||
|
if let Some(w) = ws.iter_mut().find(|w| w.id == writer_id) {
|
||||||
|
w.draining.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let pool = Arc::downgrade(self);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
if let Some(p) = pool.upgrade() {
|
||||||
|
if p.registry.is_writer_empty(writer_id).await {
|
||||||
|
let _ = p.remove_writer_only(writer_id).await;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn hex_dump(data: &[u8]) -> String {
|
fn hex_dump(data: &[u8]) -> String {
|
||||||
|
|||||||
@@ -1,16 +1,28 @@
|
|||||||
use std::net::{IpAddr, Ipv4Addr};
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use tracing::{info, warn};
|
use tracing::{info, warn, debug};
|
||||||
|
|
||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
|
use crate::network::probe::is_bogon;
|
||||||
|
use crate::network::stun::{stun_probe_dual, IpFamily, StunProbeResult};
|
||||||
|
|
||||||
use super::MePool;
|
use super::MePool;
|
||||||
|
use std::time::Instant;
|
||||||
|
pub async fn stun_probe(stun_addr: Option<String>) -> Result<crate::network::stun::DualStunResult> {
|
||||||
|
let stun_addr = stun_addr.unwrap_or_else(|| "stun.l.google.com:19302".to_string());
|
||||||
|
stun_probe_dual(&stun_addr).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn detect_public_ip() -> Option<IpAddr> {
|
||||||
|
fetch_public_ipv4_with_retry().await.ok().flatten().map(IpAddr::V4)
|
||||||
|
}
|
||||||
|
|
||||||
impl MePool {
|
impl MePool {
|
||||||
pub(super) fn translate_ip_for_nat(&self, ip: IpAddr) -> IpAddr {
|
pub(super) fn translate_ip_for_nat(&self, ip: IpAddr) -> IpAddr {
|
||||||
let nat_ip = self
|
let nat_ip = self
|
||||||
.nat_ip_cfg
|
.nat_ip_cfg
|
||||||
.or_else(|| self.nat_ip_detected.get().copied());
|
.or_else(|| self.nat_ip_detected.try_read().ok().and_then(|g| (*g).clone()));
|
||||||
|
|
||||||
let Some(nat_ip) = nat_ip else {
|
let Some(nat_ip) = nat_ip else {
|
||||||
return ip;
|
return ip;
|
||||||
@@ -18,7 +30,7 @@ impl MePool {
|
|||||||
|
|
||||||
match (ip, nat_ip) {
|
match (ip, nat_ip) {
|
||||||
(IpAddr::V4(src), IpAddr::V4(dst))
|
(IpAddr::V4(src), IpAddr::V4(dst))
|
||||||
if is_privateish(IpAddr::V4(src))
|
if is_bogon(IpAddr::V4(src))
|
||||||
|| src.is_loopback()
|
|| src.is_loopback()
|
||||||
|| src.is_unspecified() =>
|
|| src.is_unspecified() =>
|
||||||
{
|
{
|
||||||
@@ -38,7 +50,7 @@ impl MePool {
|
|||||||
) -> std::net::SocketAddr {
|
) -> std::net::SocketAddr {
|
||||||
let ip = if let Some(r) = reflected {
|
let ip = if let Some(r) = reflected {
|
||||||
// Use reflected IP (not port) only when local address is non-public.
|
// Use reflected IP (not port) only when local address is non-public.
|
||||||
if is_privateish(addr.ip()) || addr.ip().is_loopback() || addr.ip().is_unspecified() {
|
if is_bogon(addr.ip()) || addr.ip().is_loopback() || addr.ip().is_unspecified() {
|
||||||
r.ip()
|
r.ip()
|
||||||
} else {
|
} else {
|
||||||
self.translate_ip_for_nat(addr.ip())
|
self.translate_ip_for_nat(addr.ip())
|
||||||
@@ -56,17 +68,20 @@ impl MePool {
|
|||||||
return self.nat_ip_cfg;
|
return self.nat_ip_cfg;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !(is_privateish(local_ip) || local_ip.is_loopback() || local_ip.is_unspecified()) {
|
if !(is_bogon(local_ip) || local_ip.is_loopback() || local_ip.is_unspecified()) {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ip) = self.nat_ip_detected.get().copied() {
|
if let Some(ip) = self.nat_ip_detected.read().await.clone() {
|
||||||
return Some(ip);
|
return Some(ip);
|
||||||
}
|
}
|
||||||
|
|
||||||
match fetch_public_ipv4().await {
|
match fetch_public_ipv4_with_retry().await {
|
||||||
Ok(Some(ip)) => {
|
Ok(Some(ip)) => {
|
||||||
let _ = self.nat_ip_detected.set(IpAddr::V4(ip));
|
{
|
||||||
|
let mut guard = self.nat_ip_detected.write().await;
|
||||||
|
*guard = Some(IpAddr::V4(ip));
|
||||||
|
}
|
||||||
info!(public_ip = %ip, "Auto-detected public IP for NAT translation");
|
info!(public_ip = %ip, "Auto-detected public IP for NAT translation");
|
||||||
Some(IpAddr::V4(ip))
|
Some(IpAddr::V4(ip))
|
||||||
}
|
}
|
||||||
@@ -78,28 +93,97 @@ impl MePool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn maybe_reflect_public_addr(&self) -> Option<std::net::SocketAddr> {
|
pub(super) async fn maybe_reflect_public_addr(
|
||||||
|
&self,
|
||||||
|
family: IpFamily,
|
||||||
|
) -> Option<std::net::SocketAddr> {
|
||||||
|
const STUN_CACHE_TTL: Duration = Duration::from_secs(600);
|
||||||
|
// If STUN probing was disabled after attempts, reuse cached (even stale) or skip.
|
||||||
|
if self.nat_probe_disabled.load(std::sync::atomic::Ordering::Relaxed) {
|
||||||
|
if let Ok(cache) = self.nat_reflection_cache.try_lock() {
|
||||||
|
let slot = match family {
|
||||||
|
IpFamily::V4 => cache.v4,
|
||||||
|
IpFamily::V6 => cache.v6,
|
||||||
|
};
|
||||||
|
return slot.map(|(_, addr)| addr);
|
||||||
|
}
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(mut cache) = self.nat_reflection_cache.try_lock() {
|
||||||
|
let slot = match family {
|
||||||
|
IpFamily::V4 => &mut cache.v4,
|
||||||
|
IpFamily::V6 => &mut cache.v6,
|
||||||
|
};
|
||||||
|
if let Some((ts, addr)) = slot {
|
||||||
|
if ts.elapsed() < STUN_CACHE_TTL {
|
||||||
|
return Some(*addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let attempt = self.nat_probe_attempts.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||||
|
if attempt >= 2 {
|
||||||
|
self.nat_probe_disabled.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
let stun_addr = self
|
let stun_addr = self
|
||||||
.nat_stun
|
.nat_stun
|
||||||
.clone()
|
.clone()
|
||||||
.unwrap_or_else(|| "stun.l.google.com:19302".to_string());
|
.unwrap_or_else(|| "stun.l.google.com:19302".to_string());
|
||||||
match fetch_stun_binding(&stun_addr).await {
|
match stun_probe_dual(&stun_addr).await {
|
||||||
Ok(sa) => {
|
Ok(res) => {
|
||||||
if let Some(sa) = sa {
|
let picked: Option<StunProbeResult> = match family {
|
||||||
info!(%sa, "NAT probe: reflected address");
|
IpFamily::V4 => res.v4,
|
||||||
|
IpFamily::V6 => res.v6,
|
||||||
|
};
|
||||||
|
if let Some(result) = picked {
|
||||||
|
info!(local = %result.local_addr, reflected = %result.reflected_addr, family = ?family, "NAT probe: reflected address");
|
||||||
|
if let Ok(mut cache) = self.nat_reflection_cache.try_lock() {
|
||||||
|
let slot = match family {
|
||||||
|
IpFamily::V4 => &mut cache.v4,
|
||||||
|
IpFamily::V6 => &mut cache.v6,
|
||||||
|
};
|
||||||
|
*slot = Some((Instant::now(), result.reflected_addr));
|
||||||
|
}
|
||||||
|
Some(result.reflected_addr)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
sa
|
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(error = %e, "NAT probe failed");
|
let attempts = attempt + 1;
|
||||||
|
if attempts <= 2 {
|
||||||
|
warn!(error = %e, attempt = attempts, "NAT probe failed");
|
||||||
|
} else {
|
||||||
|
debug!(error = %e, attempt = attempts, "NAT probe suppressed after max attempts");
|
||||||
|
}
|
||||||
|
if attempts >= 2 {
|
||||||
|
self.nat_probe_disabled.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||||
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn fetch_public_ipv4() -> Result<Option<Ipv4Addr>> {
|
async fn fetch_public_ipv4_with_retry() -> Result<Option<Ipv4Addr>> {
|
||||||
let res = reqwest::get("https://checkip.amazonaws.com").await.map_err(|e| {
|
let providers = [
|
||||||
|
"https://checkip.amazonaws.com",
|
||||||
|
"http://v4.ident.me",
|
||||||
|
"http://ipv4.icanhazip.com",
|
||||||
|
];
|
||||||
|
for url in providers {
|
||||||
|
if let Ok(Some(ip)) = fetch_public_ipv4_once(url).await {
|
||||||
|
return Ok(Some(ip));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_public_ipv4_once(url: &str) -> Result<Option<Ipv4Addr>> {
|
||||||
|
let res = reqwest::get(url).await.map_err(|e| {
|
||||||
ProxyError::Proxy(format!("public IP detection request failed: {e}"))
|
ProxyError::Proxy(format!("public IP detection request failed: {e}"))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
@@ -110,91 +194,3 @@ async fn fetch_public_ipv4() -> Result<Option<Ipv4Addr>> {
|
|||||||
let ip = text.trim().parse().ok();
|
let ip = text.trim().parse().ok();
|
||||||
Ok(ip)
|
Ok(ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn fetch_stun_binding(stun_addr: &str) -> Result<Option<std::net::SocketAddr>> {
|
|
||||||
use rand::RngCore;
|
|
||||||
use tokio::net::UdpSocket;
|
|
||||||
|
|
||||||
let socket = UdpSocket::bind("0.0.0.0:0")
|
|
||||||
.await
|
|
||||||
.map_err(|e| ProxyError::Proxy(format!("STUN bind failed: {e}")))?;
|
|
||||||
socket
|
|
||||||
.connect(stun_addr)
|
|
||||||
.await
|
|
||||||
.map_err(|e| ProxyError::Proxy(format!("STUN connect failed: {e}")))?;
|
|
||||||
|
|
||||||
// Build minimal Binding Request.
|
|
||||||
let mut req = vec![0u8; 20];
|
|
||||||
req[0..2].copy_from_slice(&0x0001u16.to_be_bytes()); // Binding Request
|
|
||||||
req[2..4].copy_from_slice(&0u16.to_be_bytes()); // length
|
|
||||||
req[4..8].copy_from_slice(&0x2112A442u32.to_be_bytes()); // magic cookie
|
|
||||||
rand::thread_rng().fill_bytes(&mut req[8..20]);
|
|
||||||
|
|
||||||
socket
|
|
||||||
.send(&req)
|
|
||||||
.await
|
|
||||||
.map_err(|e| ProxyError::Proxy(format!("STUN send failed: {e}")))?;
|
|
||||||
|
|
||||||
let mut buf = [0u8; 128];
|
|
||||||
let n = socket
|
|
||||||
.recv(&mut buf)
|
|
||||||
.await
|
|
||||||
.map_err(|e| ProxyError::Proxy(format!("STUN recv failed: {e}")))?;
|
|
||||||
if n < 20 {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse attributes.
|
|
||||||
let mut idx = 20;
|
|
||||||
while idx + 4 <= n {
|
|
||||||
let atype = u16::from_be_bytes(buf[idx..idx + 2].try_into().unwrap());
|
|
||||||
let alen = u16::from_be_bytes(buf[idx + 2..idx + 4].try_into().unwrap()) as usize;
|
|
||||||
idx += 4;
|
|
||||||
if idx + alen > n {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
match atype {
|
|
||||||
0x0020 /* XOR-MAPPED-ADDRESS */ | 0x0001 /* MAPPED-ADDRESS */ => {
|
|
||||||
if alen < 8 {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
let family = buf[idx + 1];
|
|
||||||
if family != 0x01 {
|
|
||||||
// only IPv4 supported here
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
let port_bytes = [buf[idx + 2], buf[idx + 3]];
|
|
||||||
let ip_bytes = [buf[idx + 4], buf[idx + 5], buf[idx + 6], buf[idx + 7]];
|
|
||||||
|
|
||||||
let (port, ip) = if atype == 0x0020 {
|
|
||||||
let magic = 0x2112A442u32.to_be_bytes();
|
|
||||||
let port = u16::from_be_bytes(port_bytes) ^ ((magic[0] as u16) << 8 | magic[1] as u16);
|
|
||||||
let ip = [
|
|
||||||
ip_bytes[0] ^ magic[0],
|
|
||||||
ip_bytes[1] ^ magic[1],
|
|
||||||
ip_bytes[2] ^ magic[2],
|
|
||||||
ip_bytes[3] ^ magic[3],
|
|
||||||
];
|
|
||||||
(port, ip)
|
|
||||||
} else {
|
|
||||||
(u16::from_be_bytes(port_bytes), ip_bytes)
|
|
||||||
};
|
|
||||||
return Ok(Some(std::net::SocketAddr::new(
|
|
||||||
IpAddr::V4(Ipv4Addr::new(ip[0], ip[1], ip[2], ip[3])),
|
|
||||||
port,
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
idx += (alen + 3) & !3; // 4-byte alignment
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_privateish(ip: IpAddr) -> bool {
|
|
||||||
match ip {
|
|
||||||
IpAddr::V4(v4) => v4.is_private() || v4.is_link_local(),
|
|
||||||
IpAddr::V6(v6) => v6.is_unique_local(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,9 +1,13 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
use bytes::{Bytes, BytesMut};
|
use bytes::{Bytes, BytesMut};
|
||||||
use tokio::io::AsyncReadExt;
|
use tokio::io::AsyncReadExt;
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::{debug, trace, warn};
|
use tracing::{debug, trace, warn};
|
||||||
|
|
||||||
use crate::crypto::{AesCbc, crc32};
|
use crate::crypto::{AesCbc, crc32};
|
||||||
@@ -21,12 +25,21 @@ pub(crate) async fn reader_loop(
|
|||||||
enc_leftover: BytesMut,
|
enc_leftover: BytesMut,
|
||||||
mut dec: BytesMut,
|
mut dec: BytesMut,
|
||||||
writer: Arc<Mutex<RpcWriter>>,
|
writer: Arc<Mutex<RpcWriter>>,
|
||||||
|
ping_tracker: Arc<Mutex<HashMap<i64, (Instant, u64)>>>,
|
||||||
|
rtt_stats: Arc<Mutex<HashMap<u64, (f64, f64)>>>,
|
||||||
|
_writer_id: u64,
|
||||||
|
degraded: Arc<AtomicBool>,
|
||||||
|
cancel: CancellationToken,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut raw = enc_leftover;
|
let mut raw = enc_leftover;
|
||||||
|
let mut expected_seq: i32 = 0;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let mut tmp = [0u8; 16_384];
|
let mut tmp = [0u8; 16_384];
|
||||||
let n = rd.read(&mut tmp).await.map_err(ProxyError::Io)?;
|
let n = tokio::select! {
|
||||||
|
res = rd.read(&mut tmp) => res.map_err(ProxyError::Io)?,
|
||||||
|
_ = cancel.cancelled() => return Ok(()),
|
||||||
|
};
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
@@ -70,6 +83,14 @@ pub(crate) async fn reader_loop(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let seq_no = i32::from_le_bytes(frame[4..8].try_into().unwrap());
|
||||||
|
if seq_no != expected_seq {
|
||||||
|
warn!(seq_no, expected = expected_seq, "ME RPC seq mismatch");
|
||||||
|
expected_seq = seq_no.wrapping_add(1);
|
||||||
|
} else {
|
||||||
|
expected_seq = expected_seq.wrapping_add(1);
|
||||||
|
}
|
||||||
|
|
||||||
let payload = &frame[8..pe];
|
let payload = &frame[8..pe];
|
||||||
if payload.len() < 4 {
|
if payload.len() < 4 {
|
||||||
continue;
|
continue;
|
||||||
@@ -115,10 +136,30 @@ pub(crate) async fn reader_loop(
|
|||||||
let mut pong = Vec::with_capacity(12);
|
let mut pong = Vec::with_capacity(12);
|
||||||
pong.extend_from_slice(&RPC_PONG_U32.to_le_bytes());
|
pong.extend_from_slice(&RPC_PONG_U32.to_le_bytes());
|
||||||
pong.extend_from_slice(&ping_id.to_le_bytes());
|
pong.extend_from_slice(&ping_id.to_le_bytes());
|
||||||
if let Err(e) = writer.lock().await.send(&pong).await {
|
if let Err(e) = writer.lock().await.send_and_flush(&pong).await {
|
||||||
warn!(error = %e, "PONG send failed");
|
warn!(error = %e, "PONG send failed");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
} else if pt == RPC_PONG_U32 && body.len() >= 8 {
|
||||||
|
let ping_id = i64::from_le_bytes(body[0..8].try_into().unwrap());
|
||||||
|
if let Some((sent, wid)) = {
|
||||||
|
let mut guard = ping_tracker.lock().await;
|
||||||
|
guard.remove(&ping_id)
|
||||||
|
} {
|
||||||
|
let rtt = sent.elapsed().as_secs_f64() * 1000.0;
|
||||||
|
let mut stats = rtt_stats.lock().await;
|
||||||
|
let entry = stats.entry(wid).or_insert((rtt, rtt));
|
||||||
|
entry.1 = entry.1 * 0.8 + rtt * 0.2;
|
||||||
|
if rtt < entry.0 {
|
||||||
|
entry.0 = rtt;
|
||||||
|
} else {
|
||||||
|
// allow slow baseline drift upward to avoid stale minimum
|
||||||
|
entry.0 = entry.0 * 0.99 + rtt * 0.01;
|
||||||
|
}
|
||||||
|
let degraded_now = entry.1 > entry.0 * 2.0;
|
||||||
|
degraded.store(degraded_now, Ordering::Relaxed);
|
||||||
|
trace!(writer_id = wid, rtt_ms = rtt, ema_ms = entry.1, base_ms = entry.0, degraded = degraded_now, "ME RTT sample");
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
rpc_type = format_args!("0x{pt:08x}"),
|
rpc_type = format_args!("0x{pt:08x}"),
|
||||||
@@ -135,7 +176,7 @@ async fn send_close_conn(writer: &Arc<Mutex<RpcWriter>>, conn_id: u64) {
|
|||||||
p.extend_from_slice(&RPC_CLOSE_CONN_U32.to_le_bytes());
|
p.extend_from_slice(&RPC_CLOSE_CONN_U32.to_le_bytes());
|
||||||
p.extend_from_slice(&conn_id.to_le_bytes());
|
p.extend_from_slice(&conn_id.to_le_bytes());
|
||||||
|
|
||||||
if let Err(e) = writer.lock().await.send(&p).await {
|
if let Err(e) = writer.lock().await.send_and_flush(&p).await {
|
||||||
debug!(conn_id, error = %e, "Failed to send RPC_CLOSE_CONN");
|
debug!(conn_id, error = %e, "Failed to send RPC_CLOSE_CONN");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,42 +1,156 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::net::SocketAddr;
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use tokio::sync::{RwLock, mpsc};
|
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||||
|
|
||||||
|
use super::codec::RpcWriter;
|
||||||
use super::MeResponse;
|
use super::MeResponse;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ConnMeta {
|
||||||
|
pub target_dc: i16,
|
||||||
|
pub client_addr: SocketAddr,
|
||||||
|
pub our_addr: SocketAddr,
|
||||||
|
pub proto_flags: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct BoundConn {
|
||||||
|
pub conn_id: u64,
|
||||||
|
pub meta: ConnMeta,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ConnWriter {
|
||||||
|
pub writer_id: u64,
|
||||||
|
pub writer: Arc<Mutex<RpcWriter>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct RegistryInner {
|
||||||
|
map: HashMap<u64, mpsc::Sender<MeResponse>>,
|
||||||
|
writers: HashMap<u64, Arc<Mutex<RpcWriter>>>,
|
||||||
|
writer_for_conn: HashMap<u64, u64>,
|
||||||
|
conns_for_writer: HashMap<u64, HashSet<u64>>,
|
||||||
|
meta: HashMap<u64, ConnMeta>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RegistryInner {
|
||||||
|
fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
map: HashMap::new(),
|
||||||
|
writers: HashMap::new(),
|
||||||
|
writer_for_conn: HashMap::new(),
|
||||||
|
conns_for_writer: HashMap::new(),
|
||||||
|
meta: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct ConnRegistry {
|
pub struct ConnRegistry {
|
||||||
map: RwLock<HashMap<u64, mpsc::Sender<MeResponse>>>,
|
inner: RwLock<RegistryInner>,
|
||||||
next_id: AtomicU64,
|
next_id: AtomicU64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ConnRegistry {
|
impl ConnRegistry {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
// Avoid fully predictable conn_id sequence from 1.
|
|
||||||
let start = rand::random::<u64>() | 1;
|
let start = rand::random::<u64>() | 1;
|
||||||
Self {
|
Self {
|
||||||
map: RwLock::new(HashMap::new()),
|
inner: RwLock::new(RegistryInner::new()),
|
||||||
next_id: AtomicU64::new(start),
|
next_id: AtomicU64::new(start),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn register(&self) -> (u64, mpsc::Receiver<MeResponse>) {
|
pub async fn register(&self) -> (u64, mpsc::Receiver<MeResponse>) {
|
||||||
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
|
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
|
||||||
let (tx, rx) = mpsc::channel(256);
|
let (tx, rx) = mpsc::channel(1024);
|
||||||
self.map.write().await.insert(id, tx);
|
self.inner.write().await.map.insert(id, tx);
|
||||||
(id, rx)
|
(id, rx)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn unregister(&self, id: u64) {
|
/// Unregister connection, returning associated writer_id if any.
|
||||||
self.map.write().await.remove(&id);
|
pub async fn unregister(&self, id: u64) -> Option<u64> {
|
||||||
|
let mut inner = self.inner.write().await;
|
||||||
|
inner.map.remove(&id);
|
||||||
|
inner.meta.remove(&id);
|
||||||
|
if let Some(writer_id) = inner.writer_for_conn.remove(&id) {
|
||||||
|
if let Some(set) = inner.conns_for_writer.get_mut(&writer_id) {
|
||||||
|
set.remove(&id);
|
||||||
|
}
|
||||||
|
return Some(writer_id);
|
||||||
|
}
|
||||||
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn route(&self, id: u64, resp: MeResponse) -> bool {
|
pub async fn route(&self, id: u64, resp: MeResponse) -> bool {
|
||||||
let m = self.map.read().await;
|
let inner = self.inner.read().await;
|
||||||
if let Some(tx) = m.get(&id) {
|
if let Some(tx) = inner.map.get(&id) {
|
||||||
tx.send(resp).await.is_ok()
|
tx.try_send(resp).is_ok()
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn bind_writer(
|
||||||
|
&self,
|
||||||
|
conn_id: u64,
|
||||||
|
writer_id: u64,
|
||||||
|
writer: Arc<Mutex<RpcWriter>>,
|
||||||
|
meta: ConnMeta,
|
||||||
|
) {
|
||||||
|
let mut inner = self.inner.write().await;
|
||||||
|
inner.meta.entry(conn_id).or_insert(meta);
|
||||||
|
inner.writer_for_conn.insert(conn_id, writer_id);
|
||||||
|
inner.writers.entry(writer_id).or_insert_with(|| writer.clone());
|
||||||
|
inner
|
||||||
|
.conns_for_writer
|
||||||
|
.entry(writer_id)
|
||||||
|
.or_insert_with(HashSet::new)
|
||||||
|
.insert(conn_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_writer(&self, conn_id: u64) -> Option<ConnWriter> {
|
||||||
|
let inner = self.inner.read().await;
|
||||||
|
let writer_id = inner.writer_for_conn.get(&conn_id).cloned()?;
|
||||||
|
let writer = inner.writers.get(&writer_id).cloned()?;
|
||||||
|
Some(ConnWriter { writer_id, writer })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn writer_lost(&self, writer_id: u64) -> Vec<BoundConn> {
|
||||||
|
let mut inner = self.inner.write().await;
|
||||||
|
inner.writers.remove(&writer_id);
|
||||||
|
let conns = inner
|
||||||
|
.conns_for_writer
|
||||||
|
.remove(&writer_id)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.into_iter()
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let mut out = Vec::new();
|
||||||
|
for conn_id in conns {
|
||||||
|
inner.writer_for_conn.remove(&conn_id);
|
||||||
|
if let Some(m) = inner.meta.get(&conn_id) {
|
||||||
|
out.push(BoundConn {
|
||||||
|
conn_id,
|
||||||
|
meta: m.clone(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_meta(&self, conn_id: u64) -> Option<ConnMeta> {
|
||||||
|
let inner = self.inner.read().await;
|
||||||
|
inner.meta.get(&conn_id).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn is_writer_empty(&self, writer_id: u64) -> bool {
|
||||||
|
let inner = self.inner.read().await;
|
||||||
|
inner
|
||||||
|
.conns_for_writer
|
||||||
|
.get(&writer_id)
|
||||||
|
.map(|s| s.is_empty())
|
||||||
|
.unwrap_or(true)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
42
src/transport/middle_proxy/rotation.rs
Normal file
42
src/transport/middle_proxy/rotation.rs
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
|
||||||
|
use super::MePool;
|
||||||
|
|
||||||
|
/// Periodically refresh ME connections to avoid long-lived degradation.
|
||||||
|
pub async fn me_rotation_task(pool: Arc<MePool>, rng: Arc<SecureRandom>, interval: Duration) {
|
||||||
|
let interval = interval.max(Duration::from_secs(600));
|
||||||
|
loop {
|
||||||
|
tokio::time::sleep(interval).await;
|
||||||
|
|
||||||
|
let candidate = {
|
||||||
|
let ws = pool.writers.read().await;
|
||||||
|
if ws.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
let idx = (pool.rr.load(std::sync::atomic::Ordering::Relaxed) as usize) % ws.len();
|
||||||
|
ws.get(idx).cloned()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(w) = candidate else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(addr = %w.addr, writer_id = w.id, "Rotating ME connection");
|
||||||
|
match pool.connect_one(w.addr, rng.as_ref()).await {
|
||||||
|
Ok(()) => {
|
||||||
|
// Mark old writer for graceful drain; removal happens when sessions finish.
|
||||||
|
pool.mark_writer_draining(w.id).await;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(addr = %w.addr, writer_id = w.id, error = %e, "ME rotation connect failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use tracing::{debug, info, warn};
|
use tracing::{debug, info, warn};
|
||||||
|
use std::time::SystemTime;
|
||||||
|
use httpdate;
|
||||||
|
|
||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
|
|
||||||
@@ -51,7 +53,7 @@ pub async fn fetch_proxy_secret(cache_path: Option<&str>) -> Result<Vec<u8>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn download_proxy_secret() -> Result<Vec<u8>> {
|
pub async fn download_proxy_secret() -> Result<Vec<u8>> {
|
||||||
let resp = reqwest::get("https://core.telegram.org/getProxySecret")
|
let resp = reqwest::get("https://core.telegram.org/getProxySecret")
|
||||||
.await
|
.await
|
||||||
.map_err(|e| ProxyError::Proxy(format!("Failed to download proxy-secret: {e}")))?;
|
.map_err(|e| ProxyError::Proxy(format!("Failed to download proxy-secret: {e}")))?;
|
||||||
@@ -63,6 +65,23 @@ async fn download_proxy_secret() -> Result<Vec<u8>> {
|
|||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(date) = resp.headers().get(reqwest::header::DATE) {
|
||||||
|
if let Ok(date_str) = date.to_str() {
|
||||||
|
if let Ok(server_time) = httpdate::parse_http_date(date_str) {
|
||||||
|
if let Ok(skew) = SystemTime::now().duration_since(server_time).or_else(|e| {
|
||||||
|
server_time.duration_since(SystemTime::now()).map_err(|_| e)
|
||||||
|
}) {
|
||||||
|
let skew_secs = skew.as_secs();
|
||||||
|
if skew_secs > 60 {
|
||||||
|
warn!(skew_secs, "Time skew >60s detected from proxy-secret Date header");
|
||||||
|
} else if skew_secs > 30 {
|
||||||
|
warn!(skew_secs, "Time skew >30s detected from proxy-secret Date header");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let data = resp
|
let data = resp
|
||||||
.bytes()
|
.bytes()
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -1,20 +1,22 @@
|
|||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use tokio::sync::Mutex;
|
|
||||||
use tracing::{debug, warn};
|
use tracing::{debug, warn};
|
||||||
|
|
||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
use crate::protocol::constants::{RPC_CLOSE_EXT_U32, TG_MIDDLE_PROXIES_V4};
|
use crate::network::IpFamily;
|
||||||
|
use crate::protocol::constants::RPC_CLOSE_EXT_U32;
|
||||||
|
|
||||||
use super::MePool;
|
use super::MePool;
|
||||||
use super::codec::RpcWriter;
|
|
||||||
use super::wire::build_proxy_req_payload;
|
use super::wire::build_proxy_req_payload;
|
||||||
|
use rand::seq::SliceRandom;
|
||||||
|
use super::registry::ConnMeta;
|
||||||
|
|
||||||
impl MePool {
|
impl MePool {
|
||||||
pub async fn send_proxy_req(
|
pub async fn send_proxy_req(
|
||||||
&self,
|
self: &Arc<Self>,
|
||||||
conn_id: u64,
|
conn_id: u64,
|
||||||
target_dc: i16,
|
target_dc: i16,
|
||||||
client_addr: SocketAddr,
|
client_addr: SocketAddr,
|
||||||
@@ -30,73 +32,143 @@ impl MePool {
|
|||||||
self.proxy_tag.as_deref(),
|
self.proxy_tag.as_deref(),
|
||||||
proto_flags,
|
proto_flags,
|
||||||
);
|
);
|
||||||
|
let meta = ConnMeta {
|
||||||
|
target_dc,
|
||||||
|
client_addr,
|
||||||
|
our_addr,
|
||||||
|
proto_flags,
|
||||||
|
};
|
||||||
|
let mut emergency_attempts = 0;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let ws = self.writers.read().await;
|
if let Some(current) = self.registry.get_writer(conn_id).await {
|
||||||
if ws.is_empty() {
|
let send_res = {
|
||||||
return Err(ProxyError::Proxy("All ME connections dead".into()));
|
if let Ok(mut guard) = current.writer.try_lock() {
|
||||||
|
let r = guard.send(&payload).await;
|
||||||
|
drop(guard);
|
||||||
|
r
|
||||||
|
} else {
|
||||||
|
current.writer.lock().await.send(&payload).await
|
||||||
|
}
|
||||||
|
};
|
||||||
|
match send_res {
|
||||||
|
Ok(()) => return Ok(()),
|
||||||
|
Err(e) => {
|
||||||
|
warn!(error = %e, writer_id = current.writer_id, "ME write failed");
|
||||||
|
self.remove_writer_and_close_clients(current.writer_id).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
let writers: Vec<(SocketAddr, Arc<Mutex<RpcWriter>>)> = ws.iter().cloned().collect();
|
|
||||||
drop(ws);
|
|
||||||
|
|
||||||
let candidate_indices = candidate_indices_for_dc(&writers, target_dc);
|
let mut writers_snapshot = {
|
||||||
|
let ws = self.writers.read().await;
|
||||||
|
if ws.is_empty() {
|
||||||
|
return Err(ProxyError::Proxy("All ME connections dead".into()));
|
||||||
|
}
|
||||||
|
ws.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut candidate_indices = self.candidate_indices_for_dc(&writers_snapshot, target_dc).await;
|
||||||
if candidate_indices.is_empty() {
|
if candidate_indices.is_empty() {
|
||||||
return Err(ProxyError::Proxy("No ME writers available for target DC".into()));
|
// Emergency connect-on-demand
|
||||||
|
if emergency_attempts >= 3 {
|
||||||
|
return Err(ProxyError::Proxy("No ME writers available for target DC".into()));
|
||||||
|
}
|
||||||
|
emergency_attempts += 1;
|
||||||
|
for family in self.family_order() {
|
||||||
|
let map_guard = match family {
|
||||||
|
IpFamily::V4 => self.proxy_map_v4.read().await,
|
||||||
|
IpFamily::V6 => self.proxy_map_v6.read().await,
|
||||||
|
};
|
||||||
|
if let Some(addrs) = map_guard.get(&(target_dc as i32)) {
|
||||||
|
let mut shuffled = addrs.clone();
|
||||||
|
shuffled.shuffle(&mut rand::rng());
|
||||||
|
drop(map_guard);
|
||||||
|
for (ip, port) in shuffled {
|
||||||
|
let addr = SocketAddr::new(ip, port);
|
||||||
|
if self.connect_one(addr, self.rng.as_ref()).await.is_ok() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tokio::time::sleep(Duration::from_millis(100 * emergency_attempts)).await;
|
||||||
|
let ws2 = self.writers.read().await;
|
||||||
|
writers_snapshot = ws2.clone();
|
||||||
|
drop(ws2);
|
||||||
|
candidate_indices = self.candidate_indices_for_dc(&writers_snapshot, target_dc).await;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
drop(map_guard);
|
||||||
|
}
|
||||||
|
if candidate_indices.is_empty() {
|
||||||
|
return Err(ProxyError::Proxy("No ME writers available for target DC".into()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
candidate_indices.sort_by_key(|idx| {
|
||||||
|
let w = &writers_snapshot[*idx];
|
||||||
|
let degraded = w.degraded.load(Ordering::Relaxed);
|
||||||
|
let draining = w.draining.load(Ordering::Relaxed);
|
||||||
|
(draining as usize, degraded as usize)
|
||||||
|
});
|
||||||
|
|
||||||
let start = self.rr.fetch_add(1, Ordering::Relaxed) as usize % candidate_indices.len();
|
let start = self.rr.fetch_add(1, Ordering::Relaxed) as usize % candidate_indices.len();
|
||||||
|
|
||||||
// Prefer immediately available writer to avoid waiting on stalled connection.
|
|
||||||
for offset in 0..candidate_indices.len() {
|
for offset in 0..candidate_indices.len() {
|
||||||
let cidx = (start + offset) % candidate_indices.len();
|
let idx = candidate_indices[(start + offset) % candidate_indices.len()];
|
||||||
let idx = candidate_indices[cidx];
|
let w = &writers_snapshot[idx];
|
||||||
let w = writers[idx].1.clone();
|
if w.draining.load(Ordering::Relaxed) {
|
||||||
if let Ok(mut guard) = w.try_lock() {
|
continue;
|
||||||
|
}
|
||||||
|
if let Ok(mut guard) = w.writer.try_lock() {
|
||||||
let send_res = guard.send(&payload).await;
|
let send_res = guard.send(&payload).await;
|
||||||
drop(guard);
|
drop(guard);
|
||||||
match send_res {
|
match send_res {
|
||||||
Ok(()) => return Ok(()),
|
Ok(()) => {
|
||||||
|
self.registry
|
||||||
|
.bind_writer(conn_id, w.id, w.writer.clone(), meta.clone())
|
||||||
|
.await;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(error = %e, "ME write failed, removing dead conn");
|
warn!(error = %e, writer_id = w.id, "ME write failed");
|
||||||
let mut ws = self.writers.write().await;
|
self.remove_writer_and_close_clients(w.id).await;
|
||||||
ws.retain(|(_, o)| !Arc::ptr_eq(o, &w));
|
|
||||||
if ws.is_empty() {
|
|
||||||
return Err(ProxyError::Proxy("All ME connections dead".into()));
|
|
||||||
}
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// All writers are currently busy, wait for the selected one.
|
let w = writers_snapshot[candidate_indices[start]].clone();
|
||||||
let w = writers[candidate_indices[start]].1.clone();
|
if w.draining.load(Ordering::Relaxed) {
|
||||||
match w.lock().await.send(&payload).await {
|
continue;
|
||||||
Ok(()) => return Ok(()),
|
}
|
||||||
|
match w.writer.lock().await.send(&payload).await {
|
||||||
|
Ok(()) => {
|
||||||
|
self.registry
|
||||||
|
.bind_writer(conn_id, w.id, w.writer.clone(), meta.clone())
|
||||||
|
.await;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(error = %e, "ME write failed, removing dead conn");
|
warn!(error = %e, writer_id = w.id, "ME write failed (blocking)");
|
||||||
let mut ws = self.writers.write().await;
|
self.remove_writer_and_close_clients(w.id).await;
|
||||||
ws.retain(|(_, o)| !Arc::ptr_eq(o, &w));
|
|
||||||
if ws.is_empty() {
|
|
||||||
return Err(ProxyError::Proxy("All ME connections dead".into()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send_close(&self, conn_id: u64) -> Result<()> {
|
pub async fn send_close(self: &Arc<Self>, conn_id: u64) -> Result<()> {
|
||||||
let ws = self.writers.read().await;
|
if let Some(w) = self.registry.get_writer(conn_id).await {
|
||||||
if !ws.is_empty() {
|
|
||||||
let w = ws[0].1.clone();
|
|
||||||
drop(ws);
|
|
||||||
let mut p = Vec::with_capacity(12);
|
let mut p = Vec::with_capacity(12);
|
||||||
p.extend_from_slice(&RPC_CLOSE_EXT_U32.to_le_bytes());
|
p.extend_from_slice(&RPC_CLOSE_EXT_U32.to_le_bytes());
|
||||||
p.extend_from_slice(&conn_id.to_le_bytes());
|
p.extend_from_slice(&conn_id.to_le_bytes());
|
||||||
if let Err(e) = w.lock().await.send(&p).await {
|
if let Err(e) = w.writer.lock().await.send_and_flush(&p).await {
|
||||||
debug!(error = %e, "ME close write failed");
|
debug!(error = %e, "ME close write failed");
|
||||||
let mut ws = self.writers.write().await;
|
self.remove_writer_and_close_clients(w.writer_id).await;
|
||||||
ws.retain(|(_, o)| !Arc::ptr_eq(o, &w));
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
debug!(conn_id, "ME close skipped (writer missing)");
|
||||||
}
|
}
|
||||||
|
|
||||||
self.registry.unregister(conn_id).await;
|
self.registry.unregister(conn_id).await;
|
||||||
@@ -106,41 +178,73 @@ impl MePool {
|
|||||||
pub fn connection_count(&self) -> usize {
|
pub fn connection_count(&self) -> usize {
|
||||||
self.writers.try_read().map(|w| w.len()).unwrap_or(0)
|
self.writers.try_read().map(|w| w.len()).unwrap_or(0)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
pub(super) async fn candidate_indices_for_dc(
|
||||||
|
&self,
|
||||||
|
writers: &[super::pool::MeWriter],
|
||||||
|
target_dc: i16,
|
||||||
|
) -> Vec<usize> {
|
||||||
|
let key = target_dc as i32;
|
||||||
|
let mut preferred = Vec::<SocketAddr>::new();
|
||||||
|
|
||||||
fn candidate_indices_for_dc(
|
for family in self.family_order() {
|
||||||
writers: &[(SocketAddr, Arc<Mutex<RpcWriter>>)],
|
let map_guard = match family {
|
||||||
target_dc: i16,
|
IpFamily::V4 => self.proxy_map_v4.read().await,
|
||||||
) -> Vec<usize> {
|
IpFamily::V6 => self.proxy_map_v6.read().await,
|
||||||
let mut preferred = Vec::<SocketAddr>::new();
|
};
|
||||||
let key = target_dc as i32;
|
|
||||||
if let Some(v) = TG_MIDDLE_PROXIES_V4.get(&key) {
|
if let Some(v) = map_guard.get(&key) {
|
||||||
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
|
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
|
||||||
}
|
}
|
||||||
if preferred.is_empty() {
|
if preferred.is_empty() {
|
||||||
let abs = key.abs();
|
let abs = key.abs();
|
||||||
if let Some(v) = TG_MIDDLE_PROXIES_V4.get(&abs) {
|
if let Some(v) = map_guard.get(&abs) {
|
||||||
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
|
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if preferred.is_empty() {
|
||||||
|
let abs = key.abs();
|
||||||
|
if let Some(v) = map_guard.get(&-abs) {
|
||||||
|
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if preferred.is_empty() {
|
||||||
|
let def = self.default_dc.load(Ordering::Relaxed);
|
||||||
|
if def != 0 {
|
||||||
|
if let Some(v) = map_guard.get(&def) {
|
||||||
|
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
drop(map_guard);
|
||||||
|
|
||||||
|
if !preferred.is_empty() && !self.decision.effective_multipath {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if preferred.is_empty() {
|
if preferred.is_empty() {
|
||||||
let abs = key.abs();
|
return (0..writers.len())
|
||||||
if let Some(v) = TG_MIDDLE_PROXIES_V4.get(&-abs) {
|
.filter(|i| !writers[*i].draining.load(Ordering::Relaxed))
|
||||||
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
|
.collect();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if preferred.is_empty() {
|
let mut out = Vec::new();
|
||||||
return (0..writers.len()).collect();
|
for (idx, w) in writers.iter().enumerate() {
|
||||||
|
if w.draining.load(Ordering::Relaxed) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if preferred.iter().any(|p| *p == w.addr) {
|
||||||
|
out.push(idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if out.is_empty() {
|
||||||
|
return (0..writers.len())
|
||||||
|
.filter(|i| !writers[*i].draining.load(Ordering::Relaxed))
|
||||||
|
.collect();
|
||||||
|
}
|
||||||
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut out = Vec::new();
|
|
||||||
for (idx, (addr, _)) in writers.iter().enumerate() {
|
|
||||||
if preferred.iter().any(|p| p == addr) {
|
|
||||||
out.push(idx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if out.is_empty() {
|
|
||||||
return (0..writers.len()).collect();
|
|
||||||
}
|
|
||||||
out
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,9 +28,7 @@ fn ipv4_to_mapped_v6_c_compat(ip: Ipv4Addr) -> [u8; 16] {
|
|||||||
buf[8..12].copy_from_slice(&(-0x10000i32).to_le_bytes());
|
buf[8..12].copy_from_slice(&(-0x10000i32).to_le_bytes());
|
||||||
|
|
||||||
// Matches tl_store_int(htonl(remote_ip_host_order)).
|
// Matches tl_store_int(htonl(remote_ip_host_order)).
|
||||||
let host_order = u32::from_ne_bytes(ip.octets());
|
buf[12..16].copy_from_slice(&ip.octets());
|
||||||
let network_order = host_order.to_be();
|
|
||||||
buf[12..16].copy_from_slice(&network_order.to_le_bytes());
|
|
||||||
|
|
||||||
buf
|
buf
|
||||||
}
|
}
|
||||||
@@ -60,7 +58,7 @@ pub(crate) fn build_proxy_req_payload(
|
|||||||
append_mapped_addr_and_port(&mut b, client_addr);
|
append_mapped_addr_and_port(&mut b, client_addr);
|
||||||
append_mapped_addr_and_port(&mut b, our_addr);
|
append_mapped_addr_and_port(&mut b, our_addr);
|
||||||
|
|
||||||
if proto_flags & 12 != 0 {
|
if proto_flags & RPC_FLAG_HAS_AD_TAG != 0 {
|
||||||
let extra_start = b.len();
|
let extra_start = b.len();
|
||||||
b.extend_from_slice(&0u32.to_le_bytes());
|
b.extend_from_slice(&0u32.to_le_bytes());
|
||||||
|
|
||||||
@@ -104,3 +102,17 @@ pub fn proto_flags_for_tag(tag: crate::protocol::constants::ProtoTag, has_proxy_
|
|||||||
ProtoTag::Secure => flags | RPC_FLAG_PAD | RPC_FLAG_INTERMEDIATE,
|
ProtoTag::Secure => flags | RPC_FLAG_PAD | RPC_FLAG_INTERMEDIATE,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_ipv4_mapped_encoding() {
|
||||||
|
let ip = Ipv4Addr::new(149, 154, 175, 50);
|
||||||
|
let buf = ipv4_to_mapped_v6_c_compat(ip);
|
||||||
|
assert_eq!(&buf[0..10], &[0u8; 10]);
|
||||||
|
assert_eq!(&buf[10..12], &[0xff, 0xff]);
|
||||||
|
assert_eq!(&buf[12..16], &[149, 154, 175, 50]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -285,12 +285,17 @@ where
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use std::io::ErrorKind;
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_pool_basic() {
|
async fn test_pool_basic() {
|
||||||
// Start a test server
|
// Start a test server
|
||||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
let listener = match TcpListener::bind("127.0.0.1:0").await {
|
||||||
|
Ok(l) => l,
|
||||||
|
Err(e) if e.kind() == ErrorKind::PermissionDenied => return,
|
||||||
|
Err(e) => panic!("bind failed: {e}"),
|
||||||
|
};
|
||||||
let addr = listener.local_addr().unwrap();
|
let addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
// Accept connections in background
|
// Accept connections in background
|
||||||
@@ -303,7 +308,11 @@ mod tests {
|
|||||||
let pool = ConnectionPool::new();
|
let pool = ConnectionPool::new();
|
||||||
|
|
||||||
// Get a connection
|
// Get a connection
|
||||||
let conn1 = pool.get(addr).await.unwrap();
|
let conn1 = match pool.get(addr).await {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(ProxyError::Io(e)) if e.kind() == ErrorKind::PermissionDenied => return,
|
||||||
|
Err(e) => panic!("connect failed: {e}"),
|
||||||
|
};
|
||||||
|
|
||||||
// Return it to pool
|
// Return it to pool
|
||||||
pool.put(addr, conn1).await;
|
pool.put(addr, conn1).await;
|
||||||
@@ -335,4 +344,4 @@ mod tests {
|
|||||||
assert_eq!(stats.endpoints, 0);
|
assert_eq!(stats.endpoints, 0);
|
||||||
assert_eq!(stats.total_connections, 0);
|
assert_eq!(stats.total_connections, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -205,15 +205,29 @@ pub fn create_listener(addr: SocketAddr, options: &ListenOptions) -> Result<Sock
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use std::io::ErrorKind;
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_configure_socket() {
|
async fn test_configure_socket() {
|
||||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
let listener = match TcpListener::bind("127.0.0.1:0").await {
|
||||||
|
Ok(l) => l,
|
||||||
|
Err(e) if e.kind() == ErrorKind::PermissionDenied => return,
|
||||||
|
Err(e) => panic!("bind failed: {e}"),
|
||||||
|
};
|
||||||
let addr = listener.local_addr().unwrap();
|
let addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
let stream = TcpStream::connect(addr).await.unwrap();
|
let stream = match TcpStream::connect(addr).await {
|
||||||
configure_tcp_socket(&stream, true, Duration::from_secs(30)).unwrap();
|
Ok(s) => s,
|
||||||
|
Err(e) if e.kind() == ErrorKind::PermissionDenied => return,
|
||||||
|
Err(e) => panic!("connect failed: {e}"),
|
||||||
|
};
|
||||||
|
if let Err(e) = configure_tcp_socket(&stream, true, Duration::from_secs(30)) {
|
||||||
|
if e.kind() == ErrorKind::PermissionDenied {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
panic!("configure_tcp_socket failed: {e}");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -234,4 +248,4 @@ mod tests {
|
|||||||
assert!(opts.reuse_port);
|
assert!(opts.reuse_port);
|
||||||
assert_eq!(opts.backlog, 1024);
|
assert_eq!(opts.backlog, 1024);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
//!
|
//!
|
||||||
//! IPv6/IPv4 connectivity checks with configurable preference.
|
//! IPv6/IPv4 connectivity checks with configurable preference.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::net::{SocketAddr, IpAddr};
|
use std::net::{SocketAddr, IpAddr};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@@ -350,7 +351,13 @@ impl UpstreamManager {
|
|||||||
|
|
||||||
/// Ping all Telegram DCs through all upstreams.
|
/// Ping all Telegram DCs through all upstreams.
|
||||||
/// Tests BOTH IPv6 and IPv4, returns separate results for each.
|
/// Tests BOTH IPv6 and IPv4, returns separate results for each.
|
||||||
pub async fn ping_all_dcs(&self, prefer_ipv6: bool) -> Vec<StartupPingResult> {
|
pub async fn ping_all_dcs(
|
||||||
|
&self,
|
||||||
|
prefer_ipv6: bool,
|
||||||
|
dc_overrides: &HashMap<String, Vec<String>>,
|
||||||
|
ipv4_enabled: bool,
|
||||||
|
ipv6_enabled: bool,
|
||||||
|
) -> Vec<StartupPingResult> {
|
||||||
let upstreams: Vec<(usize, UpstreamConfig)> = {
|
let upstreams: Vec<(usize, UpstreamConfig)> = {
|
||||||
let guard = self.upstreams.read().await;
|
let guard = self.upstreams.read().await;
|
||||||
guard.iter().enumerate()
|
guard.iter().enumerate()
|
||||||
@@ -369,85 +376,161 @@ impl UpstreamManager {
|
|||||||
UpstreamType::Socks5 { address, .. } => format!("socks5://{}", address),
|
UpstreamType::Socks5 { address, .. } => format!("socks5://{}", address),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut v6_results = Vec::new();
|
let mut v6_results = Vec::with_capacity(NUM_DCS);
|
||||||
let mut v4_results = Vec::new();
|
if ipv6_enabled {
|
||||||
|
for dc_zero_idx in 0..NUM_DCS {
|
||||||
|
let dc_v6 = TG_DATACENTERS_V6[dc_zero_idx];
|
||||||
|
let addr_v6 = SocketAddr::new(dc_v6, TG_DATACENTER_PORT);
|
||||||
|
|
||||||
// === Ping IPv6 first ===
|
let result = tokio::time::timeout(
|
||||||
for dc_zero_idx in 0..NUM_DCS {
|
Duration::from_secs(DC_PING_TIMEOUT_SECS),
|
||||||
let dc_v6 = TG_DATACENTERS_V6[dc_zero_idx];
|
self.ping_single_dc(&upstream_config, addr_v6)
|
||||||
let addr_v6 = SocketAddr::new(dc_v6, TG_DATACENTER_PORT);
|
).await;
|
||||||
|
|
||||||
let result = tokio::time::timeout(
|
let ping_result = match result {
|
||||||
Duration::from_secs(DC_PING_TIMEOUT_SECS),
|
Ok(Ok(rtt_ms)) => {
|
||||||
self.ping_single_dc(&upstream_config, addr_v6)
|
let mut guard = self.upstreams.write().await;
|
||||||
).await;
|
if let Some(u) = guard.get_mut(*upstream_idx) {
|
||||||
|
u.dc_latency[dc_zero_idx].update(rtt_ms);
|
||||||
let ping_result = match result {
|
}
|
||||||
Ok(Ok(rtt_ms)) => {
|
DcPingResult {
|
||||||
let mut guard = self.upstreams.write().await;
|
dc_idx: dc_zero_idx + 1,
|
||||||
if let Some(u) = guard.get_mut(*upstream_idx) {
|
dc_addr: addr_v6,
|
||||||
u.dc_latency[dc_zero_idx].update(rtt_ms);
|
rtt_ms: Some(rtt_ms),
|
||||||
|
error: None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
DcPingResult {
|
Ok(Err(e)) => DcPingResult {
|
||||||
dc_idx: dc_zero_idx + 1,
|
dc_idx: dc_zero_idx + 1,
|
||||||
dc_addr: addr_v6,
|
dc_addr: addr_v6,
|
||||||
rtt_ms: Some(rtt_ms),
|
rtt_ms: None,
|
||||||
error: None,
|
error: Some(e.to_string()),
|
||||||
}
|
},
|
||||||
}
|
Err(_) => DcPingResult {
|
||||||
Ok(Err(e)) => DcPingResult {
|
dc_idx: dc_zero_idx + 1,
|
||||||
|
dc_addr: addr_v6,
|
||||||
|
rtt_ms: None,
|
||||||
|
error: Some("timeout".to_string()),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
v6_results.push(ping_result);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for dc_zero_idx in 0..NUM_DCS {
|
||||||
|
let dc_v6 = TG_DATACENTERS_V6[dc_zero_idx];
|
||||||
|
v6_results.push(DcPingResult {
|
||||||
dc_idx: dc_zero_idx + 1,
|
dc_idx: dc_zero_idx + 1,
|
||||||
dc_addr: addr_v6,
|
dc_addr: SocketAddr::new(dc_v6, TG_DATACENTER_PORT),
|
||||||
rtt_ms: None,
|
rtt_ms: None,
|
||||||
error: Some(e.to_string()),
|
error: Some("ipv6 disabled".to_string()),
|
||||||
},
|
});
|
||||||
Err(_) => DcPingResult {
|
}
|
||||||
dc_idx: dc_zero_idx + 1,
|
|
||||||
dc_addr: addr_v6,
|
|
||||||
rtt_ms: None,
|
|
||||||
error: Some("timeout".to_string()),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
v6_results.push(ping_result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// === Then ping IPv4 ===
|
let mut v4_results = Vec::with_capacity(NUM_DCS);
|
||||||
for dc_zero_idx in 0..NUM_DCS {
|
if ipv4_enabled {
|
||||||
let dc_v4 = TG_DATACENTERS_V4[dc_zero_idx];
|
for dc_zero_idx in 0..NUM_DCS {
|
||||||
let addr_v4 = SocketAddr::new(dc_v4, TG_DATACENTER_PORT);
|
let dc_v4 = TG_DATACENTERS_V4[dc_zero_idx];
|
||||||
|
let addr_v4 = SocketAddr::new(dc_v4, TG_DATACENTER_PORT);
|
||||||
|
|
||||||
let result = tokio::time::timeout(
|
let result = tokio::time::timeout(
|
||||||
Duration::from_secs(DC_PING_TIMEOUT_SECS),
|
Duration::from_secs(DC_PING_TIMEOUT_SECS),
|
||||||
self.ping_single_dc(&upstream_config, addr_v4)
|
self.ping_single_dc(&upstream_config, addr_v4)
|
||||||
).await;
|
).await;
|
||||||
|
|
||||||
let ping_result = match result {
|
let ping_result = match result {
|
||||||
Ok(Ok(rtt_ms)) => {
|
Ok(Ok(rtt_ms)) => {
|
||||||
let mut guard = self.upstreams.write().await;
|
let mut guard = self.upstreams.write().await;
|
||||||
if let Some(u) = guard.get_mut(*upstream_idx) {
|
if let Some(u) = guard.get_mut(*upstream_idx) {
|
||||||
u.dc_latency[dc_zero_idx].update(rtt_ms);
|
u.dc_latency[dc_zero_idx].update(rtt_ms);
|
||||||
|
}
|
||||||
|
DcPingResult {
|
||||||
|
dc_idx: dc_zero_idx + 1,
|
||||||
|
dc_addr: addr_v4,
|
||||||
|
rtt_ms: Some(rtt_ms),
|
||||||
|
error: None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
DcPingResult {
|
Ok(Err(e)) => DcPingResult {
|
||||||
dc_idx: dc_zero_idx + 1,
|
dc_idx: dc_zero_idx + 1,
|
||||||
dc_addr: addr_v4,
|
dc_addr: addr_v4,
|
||||||
rtt_ms: Some(rtt_ms),
|
rtt_ms: None,
|
||||||
error: None,
|
error: Some(e.to_string()),
|
||||||
}
|
},
|
||||||
}
|
Err(_) => DcPingResult {
|
||||||
Ok(Err(e)) => DcPingResult {
|
dc_idx: dc_zero_idx + 1,
|
||||||
|
dc_addr: addr_v4,
|
||||||
|
rtt_ms: None,
|
||||||
|
error: Some("timeout".to_string()),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
v4_results.push(ping_result);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for dc_zero_idx in 0..NUM_DCS {
|
||||||
|
let dc_v4 = TG_DATACENTERS_V4[dc_zero_idx];
|
||||||
|
v4_results.push(DcPingResult {
|
||||||
dc_idx: dc_zero_idx + 1,
|
dc_idx: dc_zero_idx + 1,
|
||||||
dc_addr: addr_v4,
|
dc_addr: SocketAddr::new(dc_v4, TG_DATACENTER_PORT),
|
||||||
rtt_ms: None,
|
rtt_ms: None,
|
||||||
error: Some(e.to_string()),
|
error: Some("ipv4 disabled".to_string()),
|
||||||
},
|
});
|
||||||
Err(_) => DcPingResult {
|
}
|
||||||
dc_idx: dc_zero_idx + 1,
|
}
|
||||||
dc_addr: addr_v4,
|
|
||||||
rtt_ms: None,
|
// === Ping DC overrides (v4/v6) ===
|
||||||
error: Some("timeout".to_string()),
|
for (dc_key, addrs) in dc_overrides {
|
||||||
|
let dc_num: i16 = match dc_key.parse::<i16>() {
|
||||||
|
Ok(v) if v > 0 => v,
|
||||||
|
Err(_) => {
|
||||||
|
warn!(dc = %dc_key, "Invalid dc_overrides key, skipping");
|
||||||
|
continue;
|
||||||
},
|
},
|
||||||
|
_ => continue,
|
||||||
};
|
};
|
||||||
v4_results.push(ping_result);
|
let dc_idx = dc_num as usize;
|
||||||
|
for addr_str in addrs {
|
||||||
|
match addr_str.parse::<SocketAddr>() {
|
||||||
|
Ok(addr) => {
|
||||||
|
let is_v6 = addr.is_ipv6();
|
||||||
|
if (is_v6 && !ipv6_enabled) || (!is_v6 && !ipv4_enabled) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let result = tokio::time::timeout(
|
||||||
|
Duration::from_secs(DC_PING_TIMEOUT_SECS),
|
||||||
|
self.ping_single_dc(&upstream_config, addr)
|
||||||
|
).await;
|
||||||
|
|
||||||
|
let ping_result = match result {
|
||||||
|
Ok(Ok(rtt_ms)) => DcPingResult {
|
||||||
|
dc_idx,
|
||||||
|
dc_addr: addr,
|
||||||
|
rtt_ms: Some(rtt_ms),
|
||||||
|
error: None,
|
||||||
|
},
|
||||||
|
Ok(Err(e)) => DcPingResult {
|
||||||
|
dc_idx,
|
||||||
|
dc_addr: addr,
|
||||||
|
rtt_ms: None,
|
||||||
|
error: Some(e.to_string()),
|
||||||
|
},
|
||||||
|
Err(_) => DcPingResult {
|
||||||
|
dc_idx,
|
||||||
|
dc_addr: addr,
|
||||||
|
rtt_ms: None,
|
||||||
|
error: Some("timeout".to_string()),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
if is_v6 {
|
||||||
|
v6_results.push(ping_result);
|
||||||
|
} else {
|
||||||
|
v4_results.push(ping_result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => warn!(dc = %dc_idx, addr = %addr_str, "Invalid dc_overrides address, skipping"),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if both IP versions have at least one working DC
|
// Check if both IP versions have at least one working DC
|
||||||
@@ -494,7 +577,7 @@ impl UpstreamManager {
|
|||||||
|
|
||||||
/// Background health check: rotates through DCs, 30s interval.
|
/// Background health check: rotates through DCs, 30s interval.
|
||||||
/// Uses preferred IP version based on config.
|
/// Uses preferred IP version based on config.
|
||||||
pub async fn run_health_checks(&self, prefer_ipv6: bool) {
|
pub async fn run_health_checks(&self, prefer_ipv6: bool, ipv4_enabled: bool, ipv6_enabled: bool) {
|
||||||
let mut dc_rotation = 0usize;
|
let mut dc_rotation = 0usize;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
@@ -503,16 +586,24 @@ impl UpstreamManager {
|
|||||||
let dc_zero_idx = dc_rotation % NUM_DCS;
|
let dc_zero_idx = dc_rotation % NUM_DCS;
|
||||||
dc_rotation += 1;
|
dc_rotation += 1;
|
||||||
|
|
||||||
let dc_addr = if prefer_ipv6 {
|
let primary_v6 = SocketAddr::new(TG_DATACENTERS_V6[dc_zero_idx], TG_DATACENTER_PORT);
|
||||||
SocketAddr::new(TG_DATACENTERS_V6[dc_zero_idx], TG_DATACENTER_PORT)
|
let primary_v4 = SocketAddr::new(TG_DATACENTERS_V4[dc_zero_idx], TG_DATACENTER_PORT);
|
||||||
|
let dc_addr = if prefer_ipv6 && ipv6_enabled {
|
||||||
|
primary_v6
|
||||||
|
} else if ipv4_enabled {
|
||||||
|
primary_v4
|
||||||
|
} else if ipv6_enabled {
|
||||||
|
primary_v6
|
||||||
} else {
|
} else {
|
||||||
SocketAddr::new(TG_DATACENTERS_V4[dc_zero_idx], TG_DATACENTER_PORT)
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let fallback_addr = if prefer_ipv6 {
|
let fallback_addr = if dc_addr.is_ipv6() && ipv4_enabled {
|
||||||
SocketAddr::new(TG_DATACENTERS_V4[dc_zero_idx], TG_DATACENTER_PORT)
|
Some(primary_v4)
|
||||||
|
} else if dc_addr.is_ipv4() && ipv6_enabled {
|
||||||
|
Some(primary_v6)
|
||||||
} else {
|
} else {
|
||||||
SocketAddr::new(TG_DATACENTERS_V6[dc_zero_idx], TG_DATACENTER_PORT)
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let count = self.upstreams.read().await.len();
|
let count = self.upstreams.read().await.len();
|
||||||
@@ -551,48 +642,60 @@ impl UpstreamManager {
|
|||||||
// Try fallback
|
// Try fallback
|
||||||
debug!(dc = dc_zero_idx + 1, "Health check failed, trying fallback");
|
debug!(dc = dc_zero_idx + 1, "Health check failed, trying fallback");
|
||||||
|
|
||||||
let start2 = Instant::now();
|
if let Some(fallback_addr) = fallback_addr {
|
||||||
let result2 = tokio::time::timeout(
|
let start2 = Instant::now();
|
||||||
Duration::from_secs(10),
|
let result2 = tokio::time::timeout(
|
||||||
self.connect_via_upstream(&config, fallback_addr)
|
Duration::from_secs(10),
|
||||||
).await;
|
self.connect_via_upstream(&config, fallback_addr)
|
||||||
|
).await;
|
||||||
|
|
||||||
|
let mut guard = self.upstreams.write().await;
|
||||||
|
let u = &mut guard[i];
|
||||||
|
|
||||||
|
match result2 {
|
||||||
|
Ok(Ok(_stream)) => {
|
||||||
|
let rtt_ms = start2.elapsed().as_secs_f64() * 1000.0;
|
||||||
|
u.dc_latency[dc_zero_idx].update(rtt_ms);
|
||||||
|
|
||||||
|
if !u.healthy {
|
||||||
|
info!(
|
||||||
|
rtt = format!("{:.0} ms", rtt_ms),
|
||||||
|
dc = dc_zero_idx + 1,
|
||||||
|
"Upstream recovered (fallback)"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
u.healthy = true;
|
||||||
|
u.fails = 0;
|
||||||
|
}
|
||||||
|
Ok(Err(e)) => {
|
||||||
|
u.fails += 1;
|
||||||
|
debug!(dc = dc_zero_idx + 1, fails = u.fails,
|
||||||
|
"Health check failed (both): {}", e);
|
||||||
|
if u.fails > 3 {
|
||||||
|
u.healthy = false;
|
||||||
|
warn!("Upstream unhealthy (fails)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
u.fails += 1;
|
||||||
|
debug!(dc = dc_zero_idx + 1, fails = u.fails,
|
||||||
|
"Health check timeout (both)");
|
||||||
|
if u.fails > 3 {
|
||||||
|
u.healthy = false;
|
||||||
|
warn!("Upstream unhealthy (timeout)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
u.last_check = std::time::Instant::now();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
let mut guard = self.upstreams.write().await;
|
let mut guard = self.upstreams.write().await;
|
||||||
let u = &mut guard[i];
|
let u = &mut guard[i];
|
||||||
|
u.fails += 1;
|
||||||
match result2 {
|
if u.fails > 3 {
|
||||||
Ok(Ok(_stream)) => {
|
u.healthy = false;
|
||||||
let rtt_ms = start2.elapsed().as_secs_f64() * 1000.0;
|
warn!("Upstream unhealthy (no fallback family)");
|
||||||
u.dc_latency[dc_zero_idx].update(rtt_ms);
|
|
||||||
|
|
||||||
if !u.healthy {
|
|
||||||
info!(
|
|
||||||
rtt = format!("{:.0} ms", rtt_ms),
|
|
||||||
dc = dc_zero_idx + 1,
|
|
||||||
"Upstream recovered (fallback)"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
u.healthy = true;
|
|
||||||
u.fails = 0;
|
|
||||||
}
|
|
||||||
Ok(Err(e)) => {
|
|
||||||
u.fails += 1;
|
|
||||||
debug!(dc = dc_zero_idx + 1, fails = u.fails,
|
|
||||||
"Health check failed (both): {}", e);
|
|
||||||
if u.fails > 3 {
|
|
||||||
u.healthy = false;
|
|
||||||
warn!("Upstream unhealthy (fails)");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
u.fails += 1;
|
|
||||||
debug!(dc = dc_zero_idx + 1, fails = u.fails,
|
|
||||||
"Health check timeout (both)");
|
|
||||||
if u.fails > 3 {
|
|
||||||
u.healthy = false;
|
|
||||||
warn!("Upstream unhealthy (timeout)");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
u.last_check = std::time::Instant::now();
|
u.last_check = std::time::Instant::now();
|
||||||
}
|
}
|
||||||
@@ -624,4 +727,4 @@ impl UpstreamManager {
|
|||||||
|
|
||||||
Some(SocketAddr::new(ip, TG_DATACENTER_PORT))
|
Some(SocketAddr::new(ip, TG_DATACENTER_PORT))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
204
tools/dc.py
Normal file
204
tools/dc.py
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
"""Telegram datacenter server checker."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from itertools import groupby
|
||||||
|
from operator import attrgetter
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from telethon import TelegramClient
|
||||||
|
from telethon.tl.functions.help import GetConfigRequest
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from telethon.tl.types import DcOption
|
||||||
|
|
||||||
|
API_ID: int = 123456
|
||||||
|
API_HASH: str = ""
|
||||||
|
SESSION_NAME: str = "session"
|
||||||
|
OUTPUT_FILE: Path = Path("telegram_servers.txt")
|
||||||
|
|
||||||
|
_CONSOLE_FLAG_MAP: dict[str, str] = {
|
||||||
|
"IPv6": "IPv6",
|
||||||
|
"MEDIA-ONLY": "🎬 MEDIA-ONLY",
|
||||||
|
"CDN": "📦 CDN",
|
||||||
|
"TCPO": "🔒 TCPO",
|
||||||
|
"STATIC": "📌 STATIC",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True, slots=True)
|
||||||
|
class DCServer:
|
||||||
|
"""Typed representation of a Telegram DC server.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
dc_id: Datacenter identifier.
|
||||||
|
ip: Server IP address.
|
||||||
|
port: Server port.
|
||||||
|
flags: Active flag labels (plain, without emoji).
|
||||||
|
"""
|
||||||
|
|
||||||
|
dc_id: int
|
||||||
|
ip: str
|
||||||
|
port: int
|
||||||
|
flags: frozenset[str] = field(default_factory=frozenset)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_option(cls, dc: DcOption) -> DCServer:
|
||||||
|
"""Create from a Telethon DcOption.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dc: Raw DcOption object.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Parsed DCServer instance.
|
||||||
|
"""
|
||||||
|
checks: dict[str, bool] = {
|
||||||
|
"IPv6": dc.ipv6,
|
||||||
|
"MEDIA-ONLY": dc.media_only,
|
||||||
|
"CDN": dc.cdn,
|
||||||
|
"TCPO": dc.tcpo_only,
|
||||||
|
"STATIC": dc.static,
|
||||||
|
}
|
||||||
|
return cls(
|
||||||
|
dc_id=dc.id,
|
||||||
|
ip=dc.ip_address,
|
||||||
|
port=dc.port,
|
||||||
|
flags=frozenset(k for k, v in checks.items() if v),
|
||||||
|
)
|
||||||
|
|
||||||
|
def flags_display(self, *, emoji: bool = False) -> str:
|
||||||
|
"""Formatted flags string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
emoji: Whether to include emoji prefixes.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Bracketed flags or '[STANDARD]'.
|
||||||
|
"""
|
||||||
|
if not self.flags:
|
||||||
|
return "[STANDARD]"
|
||||||
|
labels = sorted(
|
||||||
|
_CONSOLE_FLAG_MAP[f] if emoji else f for f in self.flags
|
||||||
|
)
|
||||||
|
return f"[{', '.join(labels)}]"
|
||||||
|
|
||||||
|
|
||||||
|
class TelegramDCChecker:
|
||||||
|
"""Fetches and displays Telegram DC configuration.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
_client: Telethon client instance.
|
||||||
|
_servers: Parsed server list.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
"""Initialize the checker."""
|
||||||
|
self._client = TelegramClient(SESSION_NAME, API_ID, API_HASH)
|
||||||
|
self._servers: list[DCServer] = []
|
||||||
|
|
||||||
|
async def run(self) -> None:
|
||||||
|
"""Connect, fetch config, display and save results."""
|
||||||
|
print("🔄 Подключаемся к Telegram...") # noqa: T201
|
||||||
|
try:
|
||||||
|
await self._client.start()
|
||||||
|
print("✅ Подключение установлено!\n") # noqa: T201
|
||||||
|
|
||||||
|
print("📡 Запрашиваем конфигурацию серверов...") # noqa: T201
|
||||||
|
config = await self._client(GetConfigRequest())
|
||||||
|
self._servers = [DCServer.from_option(dc) for dc in config.dc_options]
|
||||||
|
|
||||||
|
self._print(config)
|
||||||
|
self._save(config)
|
||||||
|
finally:
|
||||||
|
await self._client.disconnect()
|
||||||
|
print("\n👋 Отключились от Telegram") # noqa: T201
|
||||||
|
|
||||||
|
def _grouped(self) -> dict[int, list[DCServer]]:
|
||||||
|
"""Group servers by DC ID.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Ordered mapping of DC ID to servers.
|
||||||
|
"""
|
||||||
|
ordered = sorted(self._servers, key=attrgetter("dc_id"))
|
||||||
|
return {k: list(g) for k, g in groupby(ordered, key=attrgetter("dc_id"))}
|
||||||
|
|
||||||
|
def _print(self, config: object) -> None:
|
||||||
|
"""Print results to stdout in original format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Raw Telegram config.
|
||||||
|
"""
|
||||||
|
sep = "=" * 80
|
||||||
|
dash = "-" * 80
|
||||||
|
total = len(self._servers)
|
||||||
|
|
||||||
|
print(f"📊 Получено серверов: {total}\n") # noqa: T201
|
||||||
|
print(sep) # noqa: T201
|
||||||
|
|
||||||
|
for dc_id, servers in self._grouped().items():
|
||||||
|
print(f"\n🌐 DATACENTER {dc_id} ({len(servers)} серверов)") # noqa: T201
|
||||||
|
print(dash) # noqa: T201
|
||||||
|
for s in servers:
|
||||||
|
print(f" {s.ip:45}:{s.port:5} {s.flags_display(emoji=True)}") # noqa: T201
|
||||||
|
|
||||||
|
ipv4 = total - self._flag_count("IPv6")
|
||||||
|
print(f"\n{sep}") # noqa: T201
|
||||||
|
print("📈 СТАТИСТИКА:") # noqa: T201
|
||||||
|
print(sep) # noqa: T201
|
||||||
|
print(f" Всего серверов: {total}") # noqa: T201
|
||||||
|
print(f" IPv4 серверы: {ipv4}") # noqa: T201
|
||||||
|
print(f" IPv6 серверы: {self._flag_count('IPv6')}") # noqa: T201
|
||||||
|
print(f" Media-only: {self._flag_count('MEDIA-ONLY')}") # noqa: T201
|
||||||
|
print(f" CDN серверы: {self._flag_count('CDN')}") # noqa: T201
|
||||||
|
print(f" TCPO-only: {self._flag_count('TCPO')}") # noqa: T201
|
||||||
|
print(f" Static: {self._flag_count('STATIC')}") # noqa: T201
|
||||||
|
|
||||||
|
print(f"\n{sep}") # noqa: T201
|
||||||
|
print("ℹ️ ДОПОЛНИТЕЛЬНАЯ ИНФОРМАЦИЯ:") # noqa: T201
|
||||||
|
print(sep) # noqa: T201
|
||||||
|
print(f" Дата конфигурации: {config.date}") # noqa: T201 # type: ignore[attr-defined]
|
||||||
|
print(f" Expires: {config.expires}") # noqa: T201 # type: ignore[attr-defined]
|
||||||
|
print(f" Test mode: {config.test_mode}") # noqa: T201 # type: ignore[attr-defined]
|
||||||
|
print(f" This DC: {config.this_dc}") # noqa: T201 # type: ignore[attr-defined]
|
||||||
|
|
||||||
|
def _flag_count(self, flag: str) -> int:
|
||||||
|
"""Count servers with a given flag.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
flag: Flag name.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Count of matching servers.
|
||||||
|
"""
|
||||||
|
return sum(1 for s in self._servers if flag in s.flags)
|
||||||
|
|
||||||
|
def _save(self, config: object) -> None:
|
||||||
|
"""Save results to file in original format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Raw Telegram config.
|
||||||
|
"""
|
||||||
|
parts: list[str] = []
|
||||||
|
parts.append("TELEGRAM DATACENTER SERVERS\n")
|
||||||
|
parts.append("=" * 80 + "\n\n")
|
||||||
|
|
||||||
|
for dc_id, servers in self._grouped().items():
|
||||||
|
parts.append(f"\nDATACENTER {dc_id} ({len(servers)} servers)\n")
|
||||||
|
parts.append("-" * 80 + "\n")
|
||||||
|
for s in servers:
|
||||||
|
parts.append(f" {s.ip}:{s.port} {s.flags_display(emoji=False)}\n")
|
||||||
|
|
||||||
|
parts.append(f"\n\nTotal servers: {len(self._servers)}\n")
|
||||||
|
parts.append(f"Generated: {config.date}\n") # type: ignore[attr-defined]
|
||||||
|
|
||||||
|
OUTPUT_FILE.write_text("".join(parts), encoding="utf-8")
|
||||||
|
|
||||||
|
print(f"\n💾 Сохраняем результаты в файл {OUTPUT_FILE}...") # noqa: T201
|
||||||
|
print(f"✅ Результаты сохранены в {OUTPUT_FILE}") # noqa: T201
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(TelegramDCChecker().run())
|
||||||
804
tools/grafana-dashboard.json
Normal file
804
tools/grafana-dashboard.json
Normal file
@@ -0,0 +1,804 @@
|
|||||||
|
{
|
||||||
|
"apiVersion": "dashboard.grafana.app/v1beta1",
|
||||||
|
"kind": "Dashboard",
|
||||||
|
"metadata": {
|
||||||
|
"annotations": {
|
||||||
|
"grafana.app/folder": "afd9kjusw2jnkb",
|
||||||
|
"grafana.app/saved-from-ui": "Grafana v12.4.0-21693836646 (f059795f04)"
|
||||||
|
},
|
||||||
|
"labels": {},
|
||||||
|
"name": "pi9trh5",
|
||||||
|
"namespace": "default"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"annotations": {
|
||||||
|
"list": [
|
||||||
|
{
|
||||||
|
"builtIn": 1,
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"enable": true,
|
||||||
|
"hide": true,
|
||||||
|
"iconColor": "rgba(0, 211, 255, 1)",
|
||||||
|
"name": "Annotations & Alerts",
|
||||||
|
"type": "dashboard"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"editable": true,
|
||||||
|
"fiscalYearStartMonth": 0,
|
||||||
|
"graphTooltip": 0,
|
||||||
|
"links": [],
|
||||||
|
"panels": [
|
||||||
|
{
|
||||||
|
"collapsed": false,
|
||||||
|
"gridPos": {
|
||||||
|
"h": 1,
|
||||||
|
"w": 24,
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 5,
|
||||||
|
"panels": [],
|
||||||
|
"title": "Common",
|
||||||
|
"type": "row"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": 300
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "s"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 6,
|
||||||
|
"x": 0,
|
||||||
|
"y": 1
|
||||||
|
},
|
||||||
|
"id": 1,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"percentChangeColorMode": "standard",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"showPercentChange": false,
|
||||||
|
"textMode": "auto",
|
||||||
|
"wideLayout": true
|
||||||
|
},
|
||||||
|
"pluginVersion": "12.4.0-21693836646",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "max(telemt_uptime_seconds) by (service)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "uptime",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "none"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 6,
|
||||||
|
"x": 6,
|
||||||
|
"y": 1
|
||||||
|
},
|
||||||
|
"id": 2,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"percentChangeColorMode": "standard",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"showPercentChange": false,
|
||||||
|
"textMode": "auto",
|
||||||
|
"wideLayout": true
|
||||||
|
},
|
||||||
|
"pluginVersion": "12.4.0-21693836646",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "max(telemt_connections_total) by (service)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "connections_total",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "none"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 6,
|
||||||
|
"x": 12,
|
||||||
|
"y": 1
|
||||||
|
},
|
||||||
|
"id": 3,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"percentChangeColorMode": "standard",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"showPercentChange": false,
|
||||||
|
"textMode": "auto",
|
||||||
|
"wideLayout": true
|
||||||
|
},
|
||||||
|
"pluginVersion": "12.4.0-21693836646",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "max(telemt_connections_bad_total) by (service)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "connections_bad",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "none"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 6,
|
||||||
|
"x": 18,
|
||||||
|
"y": 1
|
||||||
|
},
|
||||||
|
"id": 4,
|
||||||
|
"options": {
|
||||||
|
"colorMode": "value",
|
||||||
|
"graphMode": "area",
|
||||||
|
"justifyMode": "auto",
|
||||||
|
"orientation": "auto",
|
||||||
|
"percentChangeColorMode": "standard",
|
||||||
|
"reduceOptions": {
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": "",
|
||||||
|
"values": false
|
||||||
|
},
|
||||||
|
"showPercentChange": false,
|
||||||
|
"textMode": "auto",
|
||||||
|
"wideLayout": true
|
||||||
|
},
|
||||||
|
"pluginVersion": "12.4.0-21693836646",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "max(telemt_handshake_timeouts_total) by (service)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "__auto",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "handshake_timeouts",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"collapsed": false,
|
||||||
|
"gridPos": {
|
||||||
|
"h": 1,
|
||||||
|
"w": 24,
|
||||||
|
"x": 0,
|
||||||
|
"y": 9
|
||||||
|
},
|
||||||
|
"id": 6,
|
||||||
|
"panels": [],
|
||||||
|
"repeat": "user",
|
||||||
|
"title": "$user",
|
||||||
|
"type": "row"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisBorderShow": false,
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"barWidthFactor": 0.6,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"showValues": false,
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "none"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 10
|
||||||
|
},
|
||||||
|
"id": 7,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": false
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"hideZeros": false,
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pluginVersion": "12.4.0-21693836646",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "sum(telemt_user_connections_total{user=\"$user\"}) by (user)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "{{ user }}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "user_connections",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisBorderShow": false,
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"barWidthFactor": 0.6,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"showValues": false,
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "none"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 10
|
||||||
|
},
|
||||||
|
"id": 8,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": false
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"hideZeros": false,
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pluginVersion": "12.4.0-21693836646",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "sum(telemt_user_connections_current{user=\"$user\"}) by (user)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "{{ user }}",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "user_connections_current",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisBorderShow": false,
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"barWidthFactor": 0.6,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"showValues": false,
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "binBps"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 18
|
||||||
|
},
|
||||||
|
"id": 9,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": false
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"hideZeros": false,
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pluginVersion": "12.4.0-21693836646",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "- sum(rate(telemt_user_octets_from_client{user=\"$user\"}[$__rate_interval])) by (user)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "{{ user }} TX",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "sum(rate(telemt_user_octets_to_client{user=\"$user\"}[$__rate_interval])) by (user)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "{{ user }} RX",
|
||||||
|
"range": true,
|
||||||
|
"refId": "B"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "user_octets",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisBorderShow": false,
|
||||||
|
"axisCenteredZero": false,
|
||||||
|
"axisColorMode": "text",
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"barWidthFactor": 0.6,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"viz": false
|
||||||
|
},
|
||||||
|
"insertNulls": false,
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"showValues": false,
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "pps"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 18
|
||||||
|
},
|
||||||
|
"id": 10,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": false
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"hideZeros": false,
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pluginVersion": "12.4.0-21693836646",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "- sum(rate(telemt_user_msgs_from_client{user=\"$user\"}[$__rate_interval])) by (user)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "{{ user }} TX",
|
||||||
|
"range": true,
|
||||||
|
"refId": "A"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"editorMode": "code",
|
||||||
|
"expr": "sum(rate(telemt_user_msgs_to_client{user=\"$user\"}[$__rate_interval])) by (user)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "{{ user }} RX",
|
||||||
|
"range": true,
|
||||||
|
"refId": "B"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "user_msgs",
|
||||||
|
"type": "timeseries"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"preload": false,
|
||||||
|
"schemaVersion": 42,
|
||||||
|
"tags": [],
|
||||||
|
"templating": {
|
||||||
|
"list": [
|
||||||
|
{
|
||||||
|
"current": {
|
||||||
|
"text": "docker",
|
||||||
|
"value": "docker"
|
||||||
|
},
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${datasource}"
|
||||||
|
},
|
||||||
|
"definition": "label_values(telemt_user_connections_total,user)",
|
||||||
|
"hide": 2,
|
||||||
|
"multi": true,
|
||||||
|
"name": "user",
|
||||||
|
"options": [],
|
||||||
|
"query": {
|
||||||
|
"qryType": 1,
|
||||||
|
"query": "label_values(telemt_user_connections_total,user)",
|
||||||
|
"refId": "VariableQueryEditor-VariableQuery"
|
||||||
|
},
|
||||||
|
"refresh": 1,
|
||||||
|
"regex": "",
|
||||||
|
"regexApplyTo": "value",
|
||||||
|
"sort": 1,
|
||||||
|
"type": "query"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"current": {
|
||||||
|
"text": "VM long-term",
|
||||||
|
"value": "P7D3016A027385E71"
|
||||||
|
},
|
||||||
|
"name": "datasource",
|
||||||
|
"options": [],
|
||||||
|
"query": "prometheus",
|
||||||
|
"refresh": 1,
|
||||||
|
"regex": "",
|
||||||
|
"type": "datasource"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"time": {
|
||||||
|
"from": "now-6h",
|
||||||
|
"to": "now"
|
||||||
|
},
|
||||||
|
"timepicker": {},
|
||||||
|
"timezone": "browser",
|
||||||
|
"title": "Telemt MtProto proxy",
|
||||||
|
"weekStart": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user