62 Commits

Author SHA1 Message Date
Alexey
a80db2ddbc Merge pull request #81 from telemt/3.0.0
3.0.0 Anschluss
2026-02-15 14:18:44 +03:00
Alexey
0694183ca6 Num_bigint + Num_traits Fix 2026-02-15 14:15:56 +03:00
Alexey
1f9fb29a9b Update config.toml
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-02-15 14:07:16 +03:00
Alexey
eccc69b79c Merge branch '3.0.0' of https://github.com/telemt/telemt into 3.0.0 2026-02-15 14:02:15 +03:00
Alexey
da108b2d8c Middle Proxy läuft wie auf Schienen...
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-02-15 14:02:00 +03:00
Alexey
9d94f55cdc Update Cargo.toml 2026-02-15 13:20:19 +03:00
Alexey
94a7058cc6 Middle Proxy Minimal
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-02-15 13:14:50 +03:00
Alexey
3d2e996cea Delete telemt
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-02-15 12:35:23 +03:00
Alexey
f2455c9cb1 Middle-End Drafts
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-02-15 12:30:40 +03:00
Alexey
427c7dd375 Deprecated failed KDF
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-02-15 12:29:34 +03:00
Alexey
e911a21a93 New hash in tests
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-02-15 12:29:08 +03:00
Alexey
edabad87d7 Merge pull request #78 from artemws/main
Disable color logs
2026-02-15 11:28:40 +03:00
artemws
2a65d29e3b Configure color output based on user settings
Added conditional color output configuration for logging.
2026-02-15 10:12:56 +02:00
artemws
c837a9b0c6 Add disable_colors field to GeneralConfig
Add option to disable colored output in logs
2026-02-15 10:12:33 +02:00
Alexey
f7618416b6 Merge pull request #77 from telemt/revert-68-unix-socket
Revert "Unix socket listener + reverse proxy improvements"
2026-02-15 10:09:13 +03:00
Alexey
0663e71c52 Revert "Unix socket listener + reverse proxy improvements" 2026-02-15 10:09:03 +03:00
Alexey
0599a6ec8c Merge pull request #76 from telemt/revert-72-main-fix
Revert "Main fix"
2026-02-15 10:08:34 +03:00
Alexey
b2d36aac19 Revert "Main fix" 2026-02-15 10:08:20 +03:00
Alexey
3d88ec5992 Merge pull request #74 from telemt/codeql-tuning
Update codeql.yml
2026-02-15 03:36:53 +03:00
Alexey
a693ed1e33 Merge pull request #72 from telemt/main-fix
Main fix
2026-02-15 03:36:25 +03:00
Alexey
911a504e16 Update main.rs 2026-02-15 03:34:24 +03:00
Alexey
56cd0cd1a9 Update client.rs 2026-02-15 03:27:53 +03:00
Alexey
358ad65d5f Update client.rs 2026-02-15 03:24:20 +03:00
Alexey
2f5df6ade0 Update codeql.yml 2026-02-15 03:20:19 +03:00
Alexey
e3b7be81e7 Update main.rs 2026-02-15 03:18:40 +03:00
Alexey
9a25e8e810 Update client.rs 2026-02-15 03:17:45 +03:00
Alexey
1a6b39b829 Merge pull request #68 from Katze-942/unix-socket
Unix socket listener + reverse proxy improvements
2026-02-15 02:48:39 +03:00
Alexey
a419cbbcf3 Merge branch 'main' into unix-socket 2026-02-15 02:48:24 +03:00
Alexey
b97ea1293b Merge pull request #69 from artemws/main
Unique IP address restrict for users
2026-02-15 00:24:20 +03:00
artemws
5f54eb8270 Comment out user_max_unique_ips setting
Comment out user_max_unique_ips configuration
2026-02-14 23:04:15 +02:00
artemws
06161abbbc Implement IP tracking and user limit checks
Added IP tracking and cleanup functionality for users.
2026-02-14 23:02:16 +02:00
artemws
aee549f745 Integrate IP Tracker for user IP management
Added UserIpTracker for managing user IP limits.
2026-02-14 23:01:43 +02:00
artemws
50ec753c05 Add user_max_unique_ips to configuration 2026-02-14 23:01:09 +02:00
artemws
cf34c7e75c Add files via upload 2026-02-14 23:00:26 +02:00
Жора Змейкин
572e07a7fd Unix socket listener + reverse proxy improvements 2026-02-14 23:29:39 +03:00
Alexey
4b5270137b Merge pull request #67 from telemt/main-dc-overrides
Bumped version + DC Overrides
2026-02-14 22:47:33 +03:00
Alexey
246230c924 Bumped version + DC Overrides 2026-02-14 22:46:00 +03:00
Alexey
21416af153 Merge pull request #66 from telemt/2.0.0.0-build
2.0.0.0 Build, Closing Branch
2026-02-14 22:34:13 +03:00
Alexey
b03312fa2e Merge pull request #65 from telemt/2.0.0.0-h
2.0.0.1
2026-02-14 22:20:43 +03:00
Alexey
bcdbf033b2 Delete middle_proxy.rs 2026-02-14 22:15:41 +03:00
Alexey
0a054c4a01 Find DC Method in Python
Co-Authored-By: artemws <59208085+artemws@users.noreply.github.com>
2026-02-14 21:55:29 +03:00
Alexey
eae7ad43d9 Merge pull request #63 from telemt/main-emergency
Update README.md
2026-02-14 20:40:03 +03:00
Alexey
0894ef0089 Update README.md 2026-02-14 20:39:34 +03:00
Alexey
954916960b Merge pull request #62 from telemt/main-emergency
Update README.md
2026-02-14 20:36:23 +03:00
Alexey
91d16b96ee Update README.md 2026-02-14 20:35:54 +03:00
Alexey
4bbadbc764 Merge pull request #41 from vmax/feature/show-all-links
feature: support show_links = "*"
2026-02-14 18:29:05 +03:00
Alexey
e4272ac35c Merge pull request #44 from telemt/dependabot/cargo/lru-0.16.3
Bump lru from 0.12.5 to 0.16.3
2026-02-14 13:26:34 +03:00
Alexey
46ee91c6b7 File descriptor limits for systemd: merge pull request #57 from sou1jacker/main
"Too many open files" - add file descriptor limits for systemd & Docker (fixes telemt#56)
2026-02-14 12:37:31 +03:00
Артур
ad553f8fbb docs: add ulimits to docker-compose.yml (fixes #56) 2026-02-14 01:59:30 +03:00
Артур
c0b4129209 docs: add file descriptor limits for systemd and Docker (fixes #56) 2026-02-14 01:51:29 +03:00
Max Vorobev
fc47e4d584 feature: support show_links = "*" 2026-02-14 01:02:47 +03:00
Alexey
32b16439c8 Merge pull request #55 from telemt/katze-942-ipv6
Update config.toml
2026-02-13 23:47:38 +03:00
Alexey
fd27449a26 Update config.toml 2026-02-13 23:47:26 +03:00
Alexey
3d13301711 Added Docker support, updated README.md: merge pull request #54 from sou1jacker/main
Added Docker support, updated README.md
2026-02-13 21:37:37 +03:00
sou1jacker
963ec7206b Added Docker support, updated README.md 2026-02-13 21:19:23 +03:00
Alexey
9047511256 Merge pull request #46 from telemt/codeql-tuning
CodeQL Fixes
2026-02-13 03:40:55 +03:00
Alexey
4ba907fdcd CodeQL Fixes 2026-02-13 03:39:59 +03:00
Alexey
dae19c29a0 Merge pull request #45 from telemt/codeql-tuning-1
Update codeql-config.yml
2026-02-13 03:37:09 +03:00
Alexey
25530c8c44 Update codeql-config.yml 2026-02-13 03:36:51 +03:00
dependabot[bot]
aee44d3af2 Bump lru from 0.12.5 to 0.16.3
Bumps [lru](https://github.com/jeromefroe/lru-rs) from 0.12.5 to 0.16.3.
- [Changelog](https://github.com/jeromefroe/lru-rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/jeromefroe/lru-rs/compare/0.12.5...0.16.3)

---
updated-dependencies:
- dependency-name: lru
  dependency-version: 0.16.3
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-02-13 00:31:52 +00:00
Alexey
714d83bea1 Merge pull request #43 from telemt/codeql-tuning
Updated codeql-config.yml
2026-02-13 03:11:21 +03:00
Alexey
e1bfe69b76 Updated codeql-config.yml 2026-02-13 03:11:02 +03:00
35 changed files with 1584 additions and 1141 deletions

19
.github/codeql/codeql-config.yml vendored Normal file
View File

@@ -0,0 +1,19 @@
name: "Rust without tests"
disable-default-queries: false
queries:
- uses: security-extended
- uses: security-and-quality
- uses: ./.github/codeql/queries
query-filters:
- exclude:
id:
- rust/unwrap-on-option
- rust/unwrap-on-result
- rust/expect-used
analysis:
dataflow:
default-precision: high

View File

@@ -2,9 +2,9 @@ name: "CodeQL Advanced"
on:
push:
branches: [ "main" ]
branches: [ "*" ]
pull_request:
branches: [ "main" ]
branches: [ "*" ]
schedule:
- cron: '0 0 * * 0'

21
Cargo.lock generated
View File

@@ -437,6 +437,12 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
[[package]]
name = "foldhash"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb"
[[package]]
name = "form_urlencoded"
version = "1.2.2"
@@ -608,9 +614,7 @@ version = "0.15.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
dependencies = [
"allocator-api2",
"equivalent",
"foldhash",
"foldhash 0.1.5",
]
[[package]]
@@ -618,6 +622,11 @@ name = "hashbrown"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
dependencies = [
"allocator-api2",
"equivalent",
"foldhash 0.2.0",
]
[[package]]
name = "heck"
@@ -999,11 +1008,11 @@ checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
[[package]]
name = "lru"
version = "0.12.5"
version = "0.16.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
checksum = "a1dc47f592c06f33f8e3aea9591776ec7c9f9e4124778ff8a3c3b87159f7e593"
dependencies = [
"hashbrown 0.15.5",
"hashbrown 0.16.1",
]
[[package]]

View File

@@ -1,6 +1,6 @@
[package]
name = "telemt"
version = "1.2.0"
version = "3.0.0"
edition = "2024"
[dependencies]
@@ -37,7 +37,7 @@ tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
parking_lot = "0.12"
dashmap = "5.5"
lru = "0.12"
lru = "0.16"
rand = "0.9"
chrono = { version = "0.4", features = ["serde"] }
hex = "0.4"
@@ -45,6 +45,8 @@ base64 = "0.22"
url = "2.5"
regex = "1.11"
crossbeam-queue = "0.3"
num-bigint = "0.4"
num-traits = "0.2"
# HTTP
reqwest = { version = "0.12", features = ["rustls-tls"], default-features = false }
@@ -57,4 +59,4 @@ futures = "0.3"
[[bench]]
name = "crypto_bench"
harness = false
harness = false

42
Dockerfile Normal file
View File

@@ -0,0 +1,42 @@
# ==========================
# Stage 1: Build
# ==========================
FROM rust:1.85-slim-bookworm AS builder
RUN apt-get update && apt-get install -y --no-install-recommends \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /build
COPY Cargo.toml Cargo.lock* ./
RUN mkdir src && echo 'fn main() {}' > src/main.rs && \
cargo build --release 2>/dev/null || true && \
rm -rf src
COPY . .
RUN cargo build --release && strip target/release/telemt
# ==========================
# Stage 2: Runtime
# ==========================
FROM debian:bookworm-slim
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
RUN useradd -r -s /usr/sbin/nologin telemt
WORKDIR /app
COPY --from=builder /build/target/release/telemt /app/telemt
COPY config.toml /app/config.toml
RUN chown -R telemt:telemt /app
USER telemt
EXPOSE 443
ENTRYPOINT ["/app/telemt"]
CMD ["config.toml"]

View File

@@ -3,26 +3,44 @@
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as connection pooling, replay protection, detailed statistics, masking from "prying" eyes
## Emergency
**Важное сообщение для пользователей из России**
### RU
Многие из вас столкнулись с проблемой загрузки медиа из каналов с >100k subs...
Мы работаем над проектом с Нового года и сейчас готовим новый релиз - 1.2
Мы уже знаем о проблеме: она связана с dc=203 - Telegram CDN и сейчас есть подтверждённое исправление...
В нём имплементируется поддержка Middle Proxy Protocol - основного терминатора для Ad Tag:
работа над ним идёт с 6 ферваля, а уже 10 февраля произошли "громкие события"...
🤐 ДОСТУПНО ТОЛЬКО В РЕЛИЗЕ 2.0.0.1 и последующих
Если у вас есть компетенции в асинхронных сетевых приложениях - мы открыты к предложениям и pull requests
Сейчас оно принимо через добавление в конфиг:
```toml
[dc_overrides]
"203" = "91.105.192.100:443"
```
Мы работаем над поиском всех адресов для каждого "нестандартного" DC...
**Important message for users from Russia**
Фикс вне конфига будет в релизе 2.0.0.2
We've been working on the project since December 30 and are currently preparing a new release 1.2
Если у вас есть компетенции в асинхронных сетевых приложениях, анализе трафика, reverse engineering, network forensics - мы открыты к мыслям, предложениям, pull requests
It implements support for the Middle Proxy Protocol the primary point for the Ad Tag:
development on it started on February 6th, and by February 10th, "big activity" in Russia had already "taken place"...
### EN
Many of you have encountered issues loading media from channels with over 100k subscribers…
If you have expertise in asynchronous network applications we are open to ideas and pull requests!
Were already aware of the problem: its related to `dc=203` Telegram CDN and we now have a confirmed fix.
🤐 AVAILABLE ONLY IN RELEASE 2.0.0.1 and later
Currently, you can apply it by adding the following to your config:
```toml
[dc_overrides]
"203" = "91.105.192.100:443"
```
Were working on identifying all addresses for every “nonstandard” DC…
The fix will be included in release 2.0.0.2, no manual config needed.
If you have expertise in asynchronous network applications, traffic analysis, reverse engineering, or network forensics were open to ideas, suggestions, and pull requests.
# Features
💥 The configuration structure has changed since version 1.1.0.0, change it in your environment!
💥 The configuration structure has changed since version 1.1.0.0. change it in your environment!
⚓ Our implementation of **TLS-fronting** is one of the most deeply debugged, focused, advanced and *almost* **"behaviorally consistent to real"**: we are confident we have it right - [see evidence on our validation and traces](#recognizability-for-dpi-and-crawler)
@@ -44,7 +62,9 @@ If you have expertise in asynchronous network applications we are open to id
- [Telegram Calls](#telegram-calls-via-mtproxy)
- [DPI](#how-does-dpi-see-mtproxy-tls)
- [Whitelist on Network Level](#whitelist-on-ip)
- [Too many open files](#too-many-open-files)
- [Build](#build)
- [Docker](#docker)
- [Why Rust?](#why-rust)
## Features
@@ -128,6 +148,7 @@ Type=simple
WorkingDirectory=/bin
ExecStart=/bin/telemt /etc/telemt.toml
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
@@ -377,6 +398,23 @@ Keep-Alive: timeout=60
- in China behind the Great Firewall
- in Russia on mobile networks, less in wired networks
- in Iran during "activity"
### Too many open files
- On a fresh Linux install the default open file limit is low; under load `telemt` may fail with `Accept error: Too many open files`
- **Systemd**: add `LimitNOFILE=65536` to the `[Service]` section (already included in the example above)
- **Docker**: add `--ulimit nofile=65536:65536` to your `docker run` command, or in `docker-compose.yml`:
```yaml
ulimits:
nofile:
soft: 65536
hard: 65536
```
- **System-wide** (optional): add to `/etc/security/limits.conf`:
```
* soft nofile 1048576
* hard nofile 1048576
root soft nofile 1048576
root hard nofile 1048576
```
## Build
@@ -395,9 +433,44 @@ chmod +x /bin/telemt
telemt config.toml
```
## Docker
**Quick start (Docker Compose)**
1. Edit `config.toml` in repo root (at least: port, users secrets, tls_domain)
2. Start container:
```bash
docker compose up -d --build
```
3. Check logs:
```bash
docker compose logs -f telemt
```
4. Stop:
```bash
docker compose down
```
**Notes**
- `docker-compose.yml` maps `./config.toml` to `/app/config.toml` (read-only)
- By default it publishes `443:443` and runs with dropped capabilities (only `NET_BIND_SERVICE` is added)
- If you really need host networking (usually only for some IPv6 setups) uncomment `network_mode: host`
**Run without Compose**
```bash
docker build -t telemt:local .
docker run --name telemt --restart unless-stopped \
-p 443:443 \
-e RUST_LOG=info \
-v "$PWD/config.toml:/app/config.toml:ro" \
--read-only \
--cap-drop ALL --cap-add NET_BIND_SERVICE \
--ulimit nofile=65536:65536 \
telemt:local
```
## Why Rust?
- Long-running reliability and idempotent behavior
- Rusts deterministic resource management - RAII
- Rust's deterministic resource management - RAII
- No garbage collector
- Memory safety and reduced attack surface
- Tokio's asynchronous architecture

View File

@@ -4,10 +4,10 @@ show_link = ["hello"]
# === General Settings ===
[general]
prefer_ipv6 = false
prefer_ipv6 = true
fast_mode = true
use_middle_proxy = true
ad_tag = "00000000000000000000000000000000"
#ad_tag = "00000000000000000000000000000000"
# Log level: debug | verbose | normal | silent
# Can be overridden with --silent or --log-level CLI flags
@@ -64,6 +64,9 @@ hello = "00000000000000000000000000000000"
# [access.user_max_tcp_conns]
# hello = 50
# [access.user_max_unique_ips]
# hello = 5
# [access.user_data_quota]
# hello = 1073741824 # 1 GB

24
docker-compose.yml Normal file
View File

@@ -0,0 +1,24 @@
services:
telemt:
build: .
container_name: telemt
restart: unless-stopped
ports:
- "443:443"
volumes:
- ./config.toml:/app/config.toml:ro
environment:
- RUST_LOG=info
# Uncomment this line if you want to use host network for IPv6, but bridge is default and usually better
# network_mode: host
cap_drop:
- ALL
cap_add:
- NET_BIND_SERVICE # allow binding to port 443
read_only: true
security_opt:
- no-new-privileges:true
ulimits:
nofile:
soft: 65536
hard: 65536

View File

@@ -4,8 +4,9 @@ use crate::error::{ProxyError, Result};
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use std::net::IpAddr;
use std::path::Path;
use tracing::warn;
// ============= Helper Defaults =============
@@ -164,6 +165,10 @@ pub struct GeneralConfig {
#[serde(default)]
pub log_level: LogLevel,
/// Disable colored output in logs (useful for files/systemd)
#[serde(default)]
pub disable_colors: bool,
}
impl Default for GeneralConfig {
@@ -179,6 +184,7 @@ impl Default for GeneralConfig {
middle_proxy_nat_probe: false,
middle_proxy_nat_stun: None,
log_level: LogLevel::Normal,
disable_colors: false,
}
}
}
@@ -295,6 +301,9 @@ pub struct AccessConfig {
#[serde(default)]
pub user_data_quota: HashMap<String, u64>,
#[serde(default)]
pub user_max_unique_ips: HashMap<String, usize>,
#[serde(default = "default_replay_check_len")]
pub replay_check_len: usize,
@@ -317,6 +326,7 @@ impl Default for AccessConfig {
user_max_tcp_conns: HashMap::new(),
user_expirations: HashMap::new(),
user_data_quota: HashMap::new(),
user_max_unique_ips: HashMap::new(),
replay_check_len: default_replay_check_len(),
replay_window_secs: default_replay_window_secs(),
ignore_time_skew: false,
@@ -368,6 +378,101 @@ pub struct ListenerConfig {
pub announce_ip: Option<IpAddr>,
}
// ============= ShowLink =============
/// Controls which users' proxy links are displayed at startup.
///
/// In TOML, this can be:
/// - `show_link = "*"` — show links for all users
/// - `show_link = ["a", "b"]` — show links for specific users
/// - omitted — show no links (default)
#[derive(Debug, Clone)]
pub enum ShowLink {
/// Don't show any links (default when omitted)
None,
/// Show links for all configured users
All,
/// Show links for specific users
Specific(Vec<String>),
}
impl Default for ShowLink {
fn default() -> Self {
ShowLink::None
}
}
impl ShowLink {
/// Returns true if no links should be shown
pub fn is_empty(&self) -> bool {
matches!(self, ShowLink::None) || matches!(self, ShowLink::Specific(v) if v.is_empty())
}
/// Resolve the list of user names to display, given all configured users
pub fn resolve_users<'a>(&'a self, all_users: &'a HashMap<String, String>) -> Vec<&'a String> {
match self {
ShowLink::None => vec![],
ShowLink::All => {
let mut names: Vec<&String> = all_users.keys().collect();
names.sort();
names
}
ShowLink::Specific(names) => names.iter().collect(),
}
}
}
impl Serialize for ShowLink {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
match self {
ShowLink::None => Vec::<String>::new().serialize(serializer),
ShowLink::All => serializer.serialize_str("*"),
ShowLink::Specific(v) => v.serialize(serializer),
}
}
}
impl<'de> Deserialize<'de> for ShowLink {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
use serde::de;
struct ShowLinkVisitor;
impl<'de> de::Visitor<'de> for ShowLinkVisitor {
type Value = ShowLink;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str(r#""*" or an array of user names"#)
}
fn visit_str<E: de::Error>(self, v: &str) -> std::result::Result<ShowLink, E> {
if v == "*" {
Ok(ShowLink::All)
} else {
Err(de::Error::invalid_value(
de::Unexpected::Str(v),
&r#""*""#,
))
}
}
fn visit_seq<A: de::SeqAccess<'de>>(self, mut seq: A) -> std::result::Result<ShowLink, A::Error> {
let mut names = Vec::new();
while let Some(name) = seq.next_element::<String>()? {
names.push(name);
}
if names.is_empty() {
Ok(ShowLink::None)
} else {
Ok(ShowLink::Specific(names))
}
}
}
deserializer.deserialize_any(ShowLinkVisitor)
}
}
// ============= Main Config =============
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
@@ -391,7 +496,7 @@ pub struct ProxyConfig {
pub upstreams: Vec<UpstreamConfig>,
#[serde(default)]
pub show_link: Vec<String>,
pub show_link: ShowLink,
/// DC address overrides for non-standard DCs (CDN, media, test, etc.)
/// Keys are DC indices as strings, values are "ip:port" addresses.
@@ -513,6 +618,16 @@ impl ProxyConfig {
)));
}
if let Some(tag) = &self.general.ad_tag {
let zeros = "00000000000000000000000000000000";
if tag == zeros {
warn!("ad_tag is all zeros; register a valid proxy tag via @MTProxybot to enable sponsored channel");
}
if tag.len() != 32 || tag.chars().any(|c| !c.is_ascii_hexdigit()) {
warn!("ad_tag is not a 32-char hex string; ensure you use value issued by @MTProxybot");
}
}
Ok(())
}
}

View File

@@ -172,7 +172,7 @@ mod tests {
let digest = sha256(&prekey);
assert_eq!(
hex::encode(digest),
"a4595b75f1f610f2575ace802ddc65c91b5acef3b0e0d18189e0c7c9f787d15c"
"934f5facdafd65a44d5c2df90d2f35ddc81faaaeb337949dfeef817c8a7c1e00"
);
}
}

462
src/ip_tracker.rs Normal file
View File

@@ -0,0 +1,462 @@
// src/ip_tracker.rs
// Модуль для отслеживания и ограничения уникальных IP-адресов пользователей
use std::collections::{HashMap, HashSet};
use std::net::IpAddr;
use std::sync::Arc;
use tokio::sync::RwLock;
/// Трекер уникальных IP-адресов для каждого пользователя MTProxy
///
/// Предоставляет thread-safe механизм для:
/// - Отслеживания активных IP-адресов каждого пользователя
/// - Ограничения количества уникальных IP на пользователя
/// - Автоматической очистки при отключении клиентов
#[derive(Debug, Clone)]
pub struct UserIpTracker {
/// Маппинг: Имя пользователя -> Множество активных IP-адресов
active_ips: Arc<RwLock<HashMap<String, HashSet<IpAddr>>>>,
/// Маппинг: Имя пользователя -> Максимально разрешенное количество уникальных IP
max_ips: Arc<RwLock<HashMap<String, usize>>>,
}
impl UserIpTracker {
/// Создать новый пустой трекер
pub fn new() -> Self {
Self {
active_ips: Arc::new(RwLock::new(HashMap::new())),
max_ips: Arc::new(RwLock::new(HashMap::new())),
}
}
/// Установить лимит уникальных IP для конкретного пользователя
///
/// # Arguments
/// * `username` - Имя пользователя
/// * `max_ips` - Максимальное количество одновременно активных IP-адресов
pub async fn set_user_limit(&self, username: &str, max_ips: usize) {
let mut limits = self.max_ips.write().await;
limits.insert(username.to_string(), max_ips);
}
/// Загрузить лимиты из конфигурации
///
/// # Arguments
/// * `limits` - HashMap с лимитами из config.toml
pub async fn load_limits(&self, limits: &HashMap<String, usize>) {
let mut max_ips = self.max_ips.write().await;
for (user, limit) in limits {
max_ips.insert(user.clone(), *limit);
}
}
/// Проверить, может ли пользователь подключиться с данного IP-адреса
/// и добавить IP в список активных, если проверка успешна
///
/// # Arguments
/// * `username` - Имя пользователя
/// * `ip` - IP-адрес клиента
///
/// # Returns
/// * `Ok(())` - Подключение разрешено, IP добавлен в активные
/// * `Err(String)` - Подключение отклонено с описанием причины
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
// Получаем лимит для пользователя
let max_ips = self.max_ips.read().await;
let limit = match max_ips.get(username) {
Some(limit) => *limit,
None => {
// Если лимит не задан - разрешаем безлимитный доступ
drop(max_ips);
let mut active_ips = self.active_ips.write().await;
let user_ips = active_ips
.entry(username.to_string())
.or_insert_with(HashSet::new);
user_ips.insert(ip);
return Ok(());
}
};
drop(max_ips);
// Проверяем и обновляем активные IP
let mut active_ips = self.active_ips.write().await;
let user_ips = active_ips
.entry(username.to_string())
.or_insert_with(HashSet::new);
// Если IP уже есть в списке - это повторное подключение, разрешаем
if user_ips.contains(&ip) {
return Ok(());
}
// Проверяем, не превышен ли лимит
if user_ips.len() >= limit {
return Err(format!(
"IP limit reached for user '{}': {}/{} unique IPs already connected",
username,
user_ips.len(),
limit
));
}
// Лимит не превышен - добавляем новый IP
user_ips.insert(ip);
Ok(())
}
/// Удалить IP-адрес из списка активных при отключении клиента
///
/// # Arguments
/// * `username` - Имя пользователя
/// * `ip` - IP-адрес отключившегося клиента
pub async fn remove_ip(&self, username: &str, ip: IpAddr) {
let mut active_ips = self.active_ips.write().await;
if let Some(user_ips) = active_ips.get_mut(username) {
user_ips.remove(&ip);
// Если у пользователя не осталось активных IP - удаляем запись
// для экономии памяти
if user_ips.is_empty() {
active_ips.remove(username);
}
}
}
/// Получить текущее количество активных IP-адресов для пользователя
///
/// # Arguments
/// * `username` - Имя пользователя
///
/// # Returns
/// Количество уникальных активных IP-адресов
pub async fn get_active_ip_count(&self, username: &str) -> usize {
let active_ips = self.active_ips.read().await;
active_ips
.get(username)
.map(|ips| ips.len())
.unwrap_or(0)
}
/// Получить список всех активных IP-адресов для пользователя
///
/// # Arguments
/// * `username` - Имя пользователя
///
/// # Returns
/// Вектор с активными IP-адресами
pub async fn get_active_ips(&self, username: &str) -> Vec<IpAddr> {
let active_ips = self.active_ips.read().await;
active_ips
.get(username)
.map(|ips| ips.iter().copied().collect())
.unwrap_or_else(Vec::new)
}
/// Получить статистику по всем пользователям
///
/// # Returns
/// Вектор кортежей: (имя_пользователя, количество_активных_IP, лимит)
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
let active_ips = self.active_ips.read().await;
let max_ips = self.max_ips.read().await;
let mut stats = Vec::new();
// Собираем статистику по пользователям с активными подключениями
for (username, user_ips) in active_ips.iter() {
let limit = max_ips.get(username).copied().unwrap_or(0);
stats.push((username.clone(), user_ips.len(), limit));
}
stats.sort_by(|a, b| a.0.cmp(&b.0)); // Сортируем по имени пользователя
stats
}
/// Очистить все активные IP для пользователя (при необходимости)
///
/// # Arguments
/// * `username` - Имя пользователя
pub async fn clear_user_ips(&self, username: &str) {
let mut active_ips = self.active_ips.write().await;
active_ips.remove(username);
}
/// Очистить всю статистику (использовать с осторожностью!)
pub async fn clear_all(&self) {
let mut active_ips = self.active_ips.write().await;
active_ips.clear();
}
/// Проверить, подключен ли пользователь с данного IP
///
/// # Arguments
/// * `username` - Имя пользователя
/// * `ip` - IP-адрес для проверки
///
/// # Returns
/// `true` если IP активен, `false` если нет
pub async fn is_ip_active(&self, username: &str, ip: IpAddr) -> bool {
let active_ips = self.active_ips.read().await;
active_ips
.get(username)
.map(|ips| ips.contains(&ip))
.unwrap_or(false)
}
/// Получить лимит для пользователя
///
/// # Arguments
/// * `username` - Имя пользователя
///
/// # Returns
/// Лимит IP-адресов или None, если лимит не установлен
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
let max_ips = self.max_ips.read().await;
max_ips.get(username).copied()
}
/// Форматировать статистику в читаемый текст
///
/// # Returns
/// Строка со статистикой для логов или мониторинга
pub async fn format_stats(&self) -> String {
let stats = self.get_stats().await;
if stats.is_empty() {
return String::from("No active users");
}
let mut output = String::from("User IP Statistics:\n");
output.push_str("==================\n");
for (username, active_count, limit) in stats {
output.push_str(&format!(
"User: {:<20} Active IPs: {}/{}\n",
username,
active_count,
if limit > 0 { limit.to_string() } else { "unlimited".to_string() }
));
let ips = self.get_active_ips(&username).await;
for ip in ips {
output.push_str(&format!(" └─ {}\n", ip));
}
}
output
}
}
impl Default for UserIpTracker {
fn default() -> Self {
Self::new()
}
}
// ============================================================================
// ТЕСТЫ
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
fn test_ipv4(oct1: u8, oct2: u8, oct3: u8, oct4: u8) -> IpAddr {
IpAddr::V4(Ipv4Addr::new(oct1, oct2, oct3, oct4))
}
fn test_ipv6() -> IpAddr {
IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1))
}
#[tokio::test]
async fn test_basic_ip_limit() {
let tracker = UserIpTracker::new();
tracker.set_user_limit("test_user", 2).await;
let ip1 = test_ipv4(192, 168, 1, 1);
let ip2 = test_ipv4(192, 168, 1, 2);
let ip3 = test_ipv4(192, 168, 1, 3);
// Первые два IP должны быть приняты
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
// Третий IP должен быть отклонен
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
// Проверяем счетчик
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
}
#[tokio::test]
async fn test_reconnection_from_same_ip() {
let tracker = UserIpTracker::new();
tracker.set_user_limit("test_user", 2).await;
let ip1 = test_ipv4(192, 168, 1, 1);
// Первое подключение
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
// Повторное подключение с того же IP должно пройти
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
// Счетчик не должен увеличиться
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
}
#[tokio::test]
async fn test_ip_removal() {
let tracker = UserIpTracker::new();
tracker.set_user_limit("test_user", 2).await;
let ip1 = test_ipv4(192, 168, 1, 1);
let ip2 = test_ipv4(192, 168, 1, 2);
let ip3 = test_ipv4(192, 168, 1, 3);
// Добавляем два IP
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
// Третий не должен пройти
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
// Удаляем первый IP
tracker.remove_ip("test_user", ip1).await;
// Теперь третий должен пройти
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
}
#[tokio::test]
async fn test_no_limit() {
let tracker = UserIpTracker::new();
// Не устанавливаем лимит для test_user
let ip1 = test_ipv4(192, 168, 1, 1);
let ip2 = test_ipv4(192, 168, 1, 2);
let ip3 = test_ipv4(192, 168, 1, 3);
// Без лимита все IP должны проходить
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
assert!(tracker.check_and_add("test_user", ip3).await.is_ok());
assert_eq!(tracker.get_active_ip_count("test_user").await, 3);
}
#[tokio::test]
async fn test_multiple_users() {
let tracker = UserIpTracker::new();
tracker.set_user_limit("user1", 2).await;
tracker.set_user_limit("user2", 1).await;
let ip1 = test_ipv4(192, 168, 1, 1);
let ip2 = test_ipv4(192, 168, 1, 2);
// user1 может использовать 2 IP
assert!(tracker.check_and_add("user1", ip1).await.is_ok());
assert!(tracker.check_and_add("user1", ip2).await.is_ok());
// user2 может использовать только 1 IP
assert!(tracker.check_and_add("user2", ip1).await.is_ok());
assert!(tracker.check_and_add("user2", ip2).await.is_err());
}
#[tokio::test]
async fn test_ipv6_support() {
let tracker = UserIpTracker::new();
tracker.set_user_limit("test_user", 2).await;
let ipv4 = test_ipv4(192, 168, 1, 1);
let ipv6 = test_ipv6();
// Должны работать оба типа адресов
assert!(tracker.check_and_add("test_user", ipv4).await.is_ok());
assert!(tracker.check_and_add("test_user", ipv6).await.is_ok());
assert_eq!(tracker.get_active_ip_count("test_user").await, 2);
}
#[tokio::test]
async fn test_get_active_ips() {
let tracker = UserIpTracker::new();
tracker.set_user_limit("test_user", 3).await;
let ip1 = test_ipv4(192, 168, 1, 1);
let ip2 = test_ipv4(192, 168, 1, 2);
tracker.check_and_add("test_user", ip1).await.unwrap();
tracker.check_and_add("test_user", ip2).await.unwrap();
let active_ips = tracker.get_active_ips("test_user").await;
assert_eq!(active_ips.len(), 2);
assert!(active_ips.contains(&ip1));
assert!(active_ips.contains(&ip2));
}
#[tokio::test]
async fn test_stats() {
let tracker = UserIpTracker::new();
tracker.set_user_limit("user1", 3).await;
tracker.set_user_limit("user2", 2).await;
let ip1 = test_ipv4(192, 168, 1, 1);
let ip2 = test_ipv4(192, 168, 1, 2);
tracker.check_and_add("user1", ip1).await.unwrap();
tracker.check_and_add("user2", ip2).await.unwrap();
let stats = tracker.get_stats().await;
assert_eq!(stats.len(), 2);
// Проверяем наличие обоих пользователей в статистике
assert!(stats.iter().any(|(name, _, _)| name == "user1"));
assert!(stats.iter().any(|(name, _, _)| name == "user2"));
}
#[tokio::test]
async fn test_clear_user_ips() {
let tracker = UserIpTracker::new();
let ip1 = test_ipv4(192, 168, 1, 1);
tracker.check_and_add("test_user", ip1).await.unwrap();
assert_eq!(tracker.get_active_ip_count("test_user").await, 1);
tracker.clear_user_ips("test_user").await;
assert_eq!(tracker.get_active_ip_count("test_user").await, 0);
}
#[tokio::test]
async fn test_is_ip_active() {
let tracker = UserIpTracker::new();
let ip1 = test_ipv4(192, 168, 1, 1);
let ip2 = test_ipv4(192, 168, 1, 2);
tracker.check_and_add("test_user", ip1).await.unwrap();
assert!(tracker.is_ip_active("test_user", ip1).await);
assert!(!tracker.is_ip_active("test_user", ip2).await);
}
#[tokio::test]
async fn test_load_limits_from_config() {
let tracker = UserIpTracker::new();
let mut config_limits = HashMap::new();
config_limits.insert("user1".to_string(), 5);
config_limits.insert("user2".to_string(), 3);
tracker.load_limits(&config_limits).await;
assert_eq!(tracker.get_user_limit("user1").await, Some(5));
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
assert_eq!(tracker.get_user_limit("user3").await, None);
}
}

View File

@@ -13,6 +13,7 @@ mod cli;
mod config;
mod crypto;
mod error;
mod ip_tracker;
mod protocol;
mod proxy;
mod stats;
@@ -22,12 +23,14 @@ mod util;
use crate::config::{LogLevel, ProxyConfig};
use crate::crypto::SecureRandom;
use crate::ip_tracker::UserIpTracker;
use crate::proxy::ClientHandler;
use crate::stats::{ReplayChecker, Stats};
use crate::stream::BufferPool;
use crate::transport::middle_proxy::MePool;
use crate::transport::middle_proxy::{MePool, fetch_proxy_config};
use crate::transport::{ListenOptions, UpstreamManager, create_listener};
use crate::util::ip::detect_ip;
use crate::protocol::constants::{TG_MIDDLE_PROXIES_V4, TG_MIDDLE_PROXIES_V6};
fn parse_cli() -> (String, bool, Option<String>) {
let mut config_path = "config.toml".to_string();
@@ -131,13 +134,24 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
};
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new("info"));
// Configure color output based on config
let fmt_layer = if config.general.disable_colors {
fmt::Layer::default().with_ansi(false)
} else {
fmt::Layer::default().with_ansi(true)
};
tracing_subscriber::registry()
.with(filter_layer)
.with(fmt::Layer::default())
.with(fmt_layer)
.init();
info!("Telemt MTProxy v{}", env!("CARGO_PKG_VERSION"));
info!("Log level: {}", effective_log_level);
if config.general.disable_colors {
info!("Colors: disabled");
}
info!(
"Modes: classic={} secure={} tls={}",
config.general.modes.classic, config.general.modes.secure, config.general.modes.tls
@@ -182,6 +196,14 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
let upstream_manager = Arc::new(UpstreamManager::new(config.upstreams.clone()));
let buffer_pool = Arc::new(BufferPool::with_config(16 * 1024, 4096));
// IP Tracker initialization
let ip_tracker = Arc::new(UserIpTracker::new());
ip_tracker.load_limits(&config.access.user_max_unique_ips).await;
if !config.access.user_max_unique_ips.is_empty() {
info!("IP limits configured for {} users", config.access.user_max_unique_ips.len());
}
// Connection concurrency limit
let _max_connections = Arc::new(Semaphore::new(10_000));
@@ -229,12 +251,34 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
"Proxy-secret loaded"
);
// Load ME config (v4/v6) + default DC
let mut cfg_v4 = fetch_proxy_config(
"https://core.telegram.org/getProxyConfig",
)
.await
.unwrap_or_default();
let mut cfg_v6 = fetch_proxy_config(
"https://core.telegram.org/getProxyConfigV6",
)
.await
.unwrap_or_default();
if cfg_v4.map.is_empty() {
cfg_v4.map = crate::protocol::constants::TG_MIDDLE_PROXIES_V4.clone();
}
if cfg_v6.map.is_empty() {
cfg_v6.map = crate::protocol::constants::TG_MIDDLE_PROXIES_V6.clone();
}
let pool = MePool::new(
proxy_tag,
proxy_secret,
config.general.middle_proxy_nat_ip,
config.general.middle_proxy_nat_probe,
config.general.middle_proxy_nat_stun.clone(),
cfg_v4.map.clone(),
cfg_v6.map.clone(),
cfg_v4.default_dc.or(cfg_v6.default_dc),
);
match pool.init(2, &rng).await {
@@ -251,6 +295,18 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
.await;
});
// Periodic updater: getProxyConfig + proxy-secret
let pool_clone2 = pool.clone();
let rng_clone2 = rng.clone();
tokio::spawn(async move {
crate::transport::middle_proxy::me_config_updater(
pool_clone2,
rng_clone2,
std::time::Duration::from_secs(12 * 3600),
)
.await;
});
Some(pool)
}
Err(e) => {
@@ -398,7 +454,7 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
if !config.show_link.is_empty() {
info!("--- Proxy Links ({}) ---", public_ip);
for user_name in &config.show_link {
for user_name in config.show_link.resolve_users(&config.access.users) {
if let Some(secret) = config.access.users.get(user_name) {
info!("User: {}", user_name);
if config.general.modes.classic {
@@ -458,6 +514,7 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
let buffer_pool = buffer_pool.clone();
let rng = rng.clone();
let me_pool = me_pool.clone();
let ip_tracker = ip_tracker.clone();
tokio::spawn(async move {
loop {
@@ -470,6 +527,7 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
let buffer_pool = buffer_pool.clone();
let rng = rng.clone();
let me_pool = me_pool.clone();
let ip_tracker = ip_tracker.clone();
tokio::spawn(async move {
if let Err(e) = ClientHandler::new(
@@ -482,6 +540,7 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
buffer_pool,
rng,
me_pool,
ip_tracker,
)
.run()
.await

View File

@@ -160,6 +160,12 @@ pub fn prepare_tg_nonce(
}
/// Encrypt the outgoing nonce for Telegram
/// Legacy helper — **do not use**.
/// WARNING: logic diverges from Python/C reference (SHA256 of 48 bytes, IV from head).
/// Kept only to avoid breaking external callers; prefer `encrypt_tg_nonce_with_ciphers`.
#[deprecated(
note = "Incorrect MTProto obfuscation KDF; use proxy::handshake::encrypt_tg_nonce_with_ciphers"
)]
pub fn encrypt_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
let key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
let enc_key = sha256(key_iv);
@@ -208,4 +214,4 @@ mod tests {
assert!(is_valid_nonce(&nonce));
assert_eq!(nonce.len(), HANDSHAKE_LEN);
}
}
}

View File

@@ -8,6 +8,8 @@ use crate::crypto::{sha256_hmac, SecureRandom};
use crate::error::{ProxyError, Result};
use super::constants::*;
use std::time::{SystemTime, UNIX_EPOCH};
use num_bigint::BigUint;
use num_traits::One;
// ============= Public Constants =============
@@ -311,13 +313,27 @@ pub fn validate_tls_handshake(
None
}
fn curve25519_prime() -> BigUint {
(BigUint::one() << 255) - BigUint::from(19u32)
}
/// Generate a fake X25519 public key for TLS
///
/// This generates random bytes that look like a valid X25519 public key.
/// Since we're not doing real TLS, the actual cryptographic properties don't matter.
/// Produces a quadratic residue mod p = 2^255 - 19 by computing n² mod p,
/// which matches Python/C behavior and avoids DPI fingerprinting.
pub fn gen_fake_x25519_key(rng: &SecureRandom) -> [u8; 32] {
let bytes = rng.bytes(32);
bytes.try_into().unwrap()
let mut n_bytes = [0u8; 32];
n_bytes.copy_from_slice(&rng.bytes(32));
let n = BigUint::from_bytes_le(&n_bytes);
let p = curve25519_prime();
let pk = (&n * &n) % &p;
let mut out = pk.to_bytes_le();
out.resize(32, 0);
let mut result = [0u8; 32];
result.copy_from_slice(&out[..32]);
result
}
/// Build TLS ServerHello response
@@ -498,6 +514,17 @@ mod tests {
assert_eq!(key2.len(), 32);
assert_ne!(key1, key2); // Should be random
}
#[test]
fn test_fake_x25519_key_is_quadratic_residue() {
let rng = SecureRandom::new();
let key = gen_fake_x25519_key(&rng);
let p = curve25519_prime();
let k_num = BigUint::from_bytes_le(&key);
let exponent = (&p - BigUint::one()) >> 1;
let legendre = k_num.modpow(&exponent, &p);
assert_eq!(legendre, BigUint::one());
}
#[test]
fn test_tls_extension_builder() {
@@ -641,4 +668,4 @@ mod tests {
// Should return None (no match) but not panic
assert!(result.is_none());
}
}
}

View File

@@ -11,6 +11,7 @@ use tracing::{debug, warn};
use crate::config::ProxyConfig;
use crate::crypto::SecureRandom;
use crate::error::{HandshakeResult, ProxyError, Result};
use crate::ip_tracker::UserIpTracker;
use crate::protocol::constants::*;
use crate::protocol::tls;
use crate::stats::{ReplayChecker, Stats};
@@ -35,6 +36,7 @@ pub struct RunningClientHandler {
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
me_pool: Option<Arc<MePool>>,
ip_tracker: Arc<UserIpTracker>,
}
impl ClientHandler {
@@ -48,6 +50,7 @@ impl ClientHandler {
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
me_pool: Option<Arc<MePool>>,
ip_tracker: Arc<UserIpTracker>,
) -> RunningClientHandler {
RunningClientHandler {
stream,
@@ -59,6 +62,7 @@ impl ClientHandler {
buffer_pool,
rng,
me_pool,
ip_tracker,
}
}
}
@@ -203,6 +207,8 @@ impl RunningClientHandler {
self.rng,
self.me_pool,
local_addr,
peer,
self.ip_tracker,
)
.await
}
@@ -261,6 +267,8 @@ impl RunningClientHandler {
self.rng,
self.me_pool,
local_addr,
peer,
self.ip_tracker,
)
.await
}
@@ -280,6 +288,8 @@ impl RunningClientHandler {
rng: Arc<SecureRandom>,
me_pool: Option<Arc<MePool>>,
local_addr: SocketAddr,
peer_addr: SocketAddr,
ip_tracker: Arc<UserIpTracker>,
) -> Result<()>
where
R: AsyncRead + Unpin + Send + 'static,
@@ -287,11 +297,36 @@ impl RunningClientHandler {
{
let user = &success.user;
if let Err(e) = Self::check_user_limits_static(user, &config, &stats) {
if let Err(e) = Self::check_user_limits_static(user, &config, &stats, peer_addr, &ip_tracker).await {
warn!(user = %user, error = %e, "User limit exceeded");
return Err(e);
}
// IP Cleanup Guard: автоматически удаляет IP при выходе из scope
struct IpCleanupGuard {
tracker: Arc<UserIpTracker>,
user: String,
ip: std::net::IpAddr,
}
impl Drop for IpCleanupGuard {
fn drop(&mut self) {
let tracker = self.tracker.clone();
let user = self.user.clone();
let ip = self.ip;
tokio::spawn(async move {
tracker.remove_ip(&user, ip).await;
debug!(user = %user, ip = %ip, "IP cleaned up on disconnect");
});
}
}
let _cleanup = IpCleanupGuard {
tracker: ip_tracker,
user: user.clone(),
ip: peer_addr.ip(),
};
// Decide: middle proxy or direct
if config.general.use_middle_proxy {
if let Some(ref pool) = me_pool {
@@ -304,6 +339,7 @@ impl RunningClientHandler {
config,
buffer_pool,
local_addr,
rng,
)
.await;
}
@@ -324,7 +360,13 @@ impl RunningClientHandler {
.await
}
fn check_user_limits_static(user: &str, config: &ProxyConfig, stats: &Stats) -> Result<()> {
async fn check_user_limits_static(
user: &str,
config: &ProxyConfig,
stats: &Stats,
peer_addr: SocketAddr,
ip_tracker: &UserIpTracker,
) -> Result<()> {
if let Some(expiration) = config.access.user_expirations.get(user) {
if chrono::Utc::now() > *expiration {
return Err(ProxyError::UserExpired {
@@ -333,6 +375,19 @@ impl RunningClientHandler {
}
}
// IP limit check
if let Err(reason) = ip_tracker.check_and_add(user, peer_addr.ip()).await {
warn!(
user = %user,
ip = %peer_addr.ip(),
reason = %reason,
"IP limit exceeded"
);
return Err(ProxyError::ConnectionLimitExceeded {
user: user.to_string(),
});
}
if let Some(limit) = config.access.user_max_tcp_conns.get(user) {
if stats.get_user_curr_connects(user) >= *limit as u64 {
return Err(ProxyError::ConnectionLimitExceeded {

View File

@@ -139,6 +139,8 @@ async fn do_tg_handshake_static(
success.dc_idx,
&success.dec_key,
success.dec_iv,
&success.enc_key,
success.enc_iv,
rng,
config.general.fast_mode,
);

View File

@@ -70,7 +70,7 @@ where
let digest = &handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN];
let digest_half = &digest[..tls::TLS_DIGEST_HALF_LEN];
if replay_checker.check_tls_digest(digest_half) {
if replay_checker.check_and_add_tls_digest(digest_half) {
warn!(peer = %peer, "TLS replay attack detected (duplicate digest)");
return HandshakeResult::BadClient { reader, writer };
}
@@ -122,8 +122,6 @@ where
return HandshakeResult::Error(ProxyError::Io(e));
}
replay_checker.add_tls_digest(digest_half);
info!(
peer = %peer,
user = %validation.user,
@@ -155,7 +153,7 @@ where
let dec_prekey_iv = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
if replay_checker.check_handshake(dec_prekey_iv) {
if replay_checker.check_and_add_handshake(dec_prekey_iv) {
warn!(peer = %peer, "MTProto replay attack detected");
return HandshakeResult::BadClient { reader, writer };
}
@@ -216,8 +214,6 @@ where
let enc_iv = u128::from_be_bytes(enc_iv_bytes.try_into().unwrap());
replay_checker.add_handshake(dec_prekey_iv);
let encryptor = AesCtr::new(&enc_key, enc_iv);
let success = HandshakeSuccess {
@@ -256,8 +252,10 @@ where
pub fn generate_tg_nonce(
proto_tag: ProtoTag,
dc_idx: i16,
client_dec_key: &[u8; 32],
client_dec_iv: u128,
_client_dec_key: &[u8; 32],
_client_dec_iv: u128,
client_enc_key: &[u8; 32],
client_enc_iv: u128,
rng: &SecureRandom,
fast_mode: bool,
) -> ([u8; HANDSHAKE_LEN], [u8; 32], u128, [u8; 32], u128) {
@@ -278,9 +276,11 @@ pub fn generate_tg_nonce(
nonce[DC_IDX_POS..DC_IDX_POS + 2].copy_from_slice(&dc_idx.to_le_bytes());
if fast_mode {
nonce[SKIP_LEN..SKIP_LEN + KEY_LEN].copy_from_slice(client_dec_key);
nonce[SKIP_LEN + KEY_LEN..SKIP_LEN + KEY_LEN + IV_LEN]
.copy_from_slice(&client_dec_iv.to_be_bytes());
let mut key_iv = Vec::with_capacity(KEY_LEN + IV_LEN);
key_iv.extend_from_slice(client_enc_key);
key_iv.extend_from_slice(&client_enc_iv.to_be_bytes());
key_iv.reverse(); // Python/C behavior: reversed enc_key+enc_iv in nonce
nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN].copy_from_slice(&key_iv);
}
let enc_key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
@@ -332,10 +332,21 @@ mod tests {
fn test_generate_tg_nonce() {
let client_dec_key = [0x42u8; 32];
let client_dec_iv = 12345u128;
let client_enc_key = [0x24u8; 32];
let client_enc_iv = 54321u128;
let rng = SecureRandom::new();
let (nonce, _tg_enc_key, _tg_enc_iv, _tg_dec_key, _tg_dec_iv) =
generate_tg_nonce(ProtoTag::Secure, 2, &client_dec_key, client_dec_iv, &rng, false);
generate_tg_nonce(
ProtoTag::Secure,
2,
&client_dec_key,
client_dec_iv,
&client_enc_key,
client_enc_iv,
&rng,
false,
);
assert_eq!(nonce.len(), HANDSHAKE_LEN);
@@ -347,10 +358,21 @@ mod tests {
fn test_encrypt_tg_nonce() {
let client_dec_key = [0x42u8; 32];
let client_dec_iv = 12345u128;
let client_enc_key = [0x24u8; 32];
let client_enc_iv = 54321u128;
let rng = SecureRandom::new();
let (nonce, _, _, _, _) =
generate_tg_nonce(ProtoTag::Secure, 2, &client_dec_key, client_dec_iv, &rng, false);
generate_tg_nonce(
ProtoTag::Secure,
2,
&client_dec_key,
client_dec_iv,
&client_enc_key,
client_enc_iv,
&rng,
false,
);
let encrypted = encrypt_tg_nonce(&nonce);
@@ -379,4 +401,4 @@ mod tests {
drop(success);
// Drop impl zeroizes key material without panic
}
}
}

View File

@@ -5,6 +5,7 @@ use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use tracing::{debug, info, trace};
use crate::config::ProxyConfig;
use crate::crypto::SecureRandom;
use crate::error::{ProxyError, Result};
use crate::protocol::constants::*;
use crate::proxy::handshake::HandshakeSuccess;
@@ -21,6 +22,7 @@ pub(crate) async fn handle_via_middle_proxy<R, W>(
_config: Arc<ProxyConfig>,
_buffer_pool: Arc<BufferPool>,
local_addr: SocketAddr,
rng: Arc<SecureRandom>,
) -> Result<()>
where
R: AsyncRead + Unpin + Send + 'static,
@@ -58,16 +60,23 @@ where
tokio::select! {
client_frame = read_client_payload(&mut crypto_reader, proto_tag) => {
match client_frame {
Ok(Some(payload)) => {
Ok(Some((payload, quickack))) => {
trace!(conn_id, bytes = payload.len(), "C->ME frame");
stats.add_user_octets_from(&user, payload.len() as u64);
let mut flags = proto_flags;
if quickack {
flags |= RPC_FLAG_QUICKACK;
}
if payload.len() >= 8 && payload[..8].iter().all(|b| *b == 0) {
flags |= RPC_FLAG_NOT_ENCRYPTED;
}
me_pool.send_proxy_req(
conn_id,
success.dc_idx,
peer,
translated_local_addr,
&payload,
proto_flags,
flags,
).await?;
}
Ok(None) => {
@@ -83,7 +92,7 @@ where
Some(MeResponse::Data { flags, data }) => {
trace!(conn_id, bytes = data.len(), flags, "ME->C data");
stats.add_user_octets_to(&user, data.len() as u64);
write_client_payload(&mut crypto_writer, proto_tag, flags, &data).await?;
write_client_payload(&mut crypto_writer, proto_tag, flags, &data, rng.as_ref()).await?;
}
Some(MeResponse::Ack(confirm)) => {
trace!(conn_id, confirm, "ME->C quickack");
@@ -111,11 +120,11 @@ where
async fn read_client_payload<R>(
client_reader: &mut CryptoReader<R>,
proto_tag: ProtoTag,
) -> Result<Option<Vec<u8>>>
) -> Result<Option<(Vec<u8>, bool)>>
where
R: AsyncRead + Unpin + Send + 'static,
{
let len = match proto_tag {
let (len, quickack) = match proto_tag {
ProtoTag::Abridged => {
let mut first = [0u8; 1];
match client_reader.read_exact(&mut first).await {
@@ -124,6 +133,7 @@ where
Err(e) => return Err(ProxyError::Io(e)),
}
let quickack = (first[0] & 0x80) != 0;
let len_words = if (first[0] & 0x7f) == 0x7f {
let mut ext = [0u8; 3];
client_reader
@@ -135,9 +145,10 @@ where
(first[0] & 0x7f) as usize
};
len_words
let len = len_words
.checked_mul(4)
.ok_or_else(|| ProxyError::Proxy("Abridged frame length overflow".into()))?
.ok_or_else(|| ProxyError::Proxy("Abridged frame length overflow".into()))?;
(len, quickack)
}
ProtoTag::Intermediate | ProtoTag::Secure => {
let mut len_buf = [0u8; 4];
@@ -146,7 +157,8 @@ where
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => return Ok(None),
Err(e) => return Err(ProxyError::Io(e)),
}
(u32::from_le_bytes(len_buf) & 0x7fff_ffff) as usize
let quickack = (len_buf[3] & 0x80) != 0;
((u32::from_le_bytes(len_buf) & 0x7fff_ffff) as usize, quickack)
}
};
@@ -159,7 +171,15 @@ where
.read_exact(&mut payload)
.await
.map_err(ProxyError::Io)?;
Ok(Some(payload))
// Secure Intermediate: remove random padding (last len%4 bytes)
if proto_tag == ProtoTag::Secure {
let rem = len % 4;
if rem != 0 && payload.len() >= rem {
payload.truncate(len - rem);
}
}
Ok(Some((payload, quickack)))
}
async fn write_client_payload<W>(
@@ -167,6 +187,7 @@ async fn write_client_payload<W>(
proto_tag: ProtoTag,
flags: u32,
data: &[u8],
rng: &SecureRandom,
) -> Result<()>
where
W: AsyncWrite + Unpin + Send + 'static,
@@ -215,7 +236,12 @@ where
.map_err(ProxyError::Io)?;
}
ProtoTag::Intermediate | ProtoTag::Secure => {
let mut len = data.len() as u32;
let padding_len = if proto_tag == ProtoTag::Secure {
(rng.bytes(1)[0] % 4) as usize
} else {
0
};
let mut len = (data.len() + padding_len) as u32;
if quickack {
len |= 0x8000_0000;
}
@@ -227,6 +253,13 @@ where
.write_all(data)
.await
.map_err(ProxyError::Io)?;
if padding_len > 0 {
let pad = rng.bytes(padding_len);
client_writer
.write_all(&pad)
.await
.map_err(ProxyError::Io)?;
}
}
}

View File

@@ -212,28 +212,41 @@ impl ReplayChecker {
(hasher.finish() as usize) & self.shard_mask
}
fn check(&self, data: &[u8]) -> bool {
fn check_and_add_internal(&self, data: &[u8]) -> bool {
self.checks.fetch_add(1, Ordering::Relaxed);
let idx = self.get_shard_idx(data);
let mut shard = self.shards[idx].lock();
let found = shard.check(data, Instant::now(), self.window);
let now = Instant::now();
let found = shard.check(data, now, self.window);
if found {
self.hits.fetch_add(1, Ordering::Relaxed);
} else {
shard.add(data, now, self.window);
self.additions.fetch_add(1, Ordering::Relaxed);
}
found
}
fn add(&self, data: &[u8]) {
fn add_only(&self, data: &[u8]) {
self.additions.fetch_add(1, Ordering::Relaxed);
let idx = self.get_shard_idx(data);
let mut shard = self.shards[idx].lock();
shard.add(data, Instant::now(), self.window);
}
pub fn check_handshake(&self, data: &[u8]) -> bool { self.check(data) }
pub fn add_handshake(&self, data: &[u8]) { self.add(data) }
pub fn check_tls_digest(&self, data: &[u8]) -> bool { self.check(data) }
pub fn add_tls_digest(&self, data: &[u8]) { self.add(data) }
pub fn check_and_add_handshake(&self, data: &[u8]) -> bool {
self.check_and_add_internal(data)
}
pub fn check_and_add_tls_digest(&self, data: &[u8]) -> bool {
self.check_and_add_internal(data)
}
// Compatibility helpers (non-atomic split operations) — prefer check_and_add_*.
pub fn check_handshake(&self, data: &[u8]) -> bool { self.check_and_add_handshake(data) }
pub fn add_handshake(&self, data: &[u8]) { self.add_only(data) }
pub fn check_tls_digest(&self, data: &[u8]) -> bool { self.check_and_add_tls_digest(data) }
pub fn add_tls_digest(&self, data: &[u8]) { self.add_only(data) }
pub fn stats(&self) -> ReplayStats {
let mut total_entries = 0;
@@ -326,10 +339,9 @@ mod tests {
#[test]
fn test_replay_checker_basic() {
let checker = ReplayChecker::new(100, Duration::from_secs(60));
assert!(!checker.check_handshake(b"test1"));
checker.add_handshake(b"test1");
assert!(checker.check_handshake(b"test1"));
assert!(!checker.check_handshake(b"test2"));
assert!(!checker.check_handshake(b"test1")); // first time, inserts
assert!(checker.check_handshake(b"test1")); // duplicate
assert!(!checker.check_handshake(b"test2")); // new key inserts
}
#[test]
@@ -343,7 +355,7 @@ mod tests {
#[test]
fn test_replay_checker_expiration() {
let checker = ReplayChecker::new(100, Duration::from_millis(50));
checker.add_handshake(b"expire");
assert!(!checker.check_handshake(b"expire"));
assert!(checker.check_handshake(b"expire"));
std::thread::sleep(Duration::from_millis(100));
assert!(!checker.check_handshake(b"expire"));
@@ -352,25 +364,25 @@ mod tests {
#[test]
fn test_replay_checker_stats() {
let checker = ReplayChecker::new(100, Duration::from_secs(60));
checker.add_handshake(b"k1");
checker.add_handshake(b"k2");
checker.check_handshake(b"k1");
checker.check_handshake(b"k3");
assert!(!checker.check_handshake(b"k1"));
assert!(!checker.check_handshake(b"k2"));
assert!(checker.check_handshake(b"k1"));
assert!(!checker.check_handshake(b"k3"));
let stats = checker.stats();
assert_eq!(stats.total_additions, 2);
assert_eq!(stats.total_checks, 2);
assert_eq!(stats.total_additions, 3);
assert_eq!(stats.total_checks, 4);
assert_eq!(stats.total_hits, 1);
}
#[test]
fn test_replay_checker_many_keys() {
let checker = ReplayChecker::new(1000, Duration::from_secs(60));
let checker = ReplayChecker::new(10_000, Duration::from_secs(60));
for i in 0..500u32 {
checker.add(&i.to_le_bytes());
checker.add_only(&i.to_le_bytes());
}
for i in 0..500u32 {
assert!(checker.check(&i.to_le_bytes()));
assert!(checker.check_handshake(&i.to_le_bytes()));
}
assert_eq!(checker.stats().total_entries, 500);
}
}
}

View File

@@ -381,9 +381,14 @@ mod tests {
// Add a buffer to pool
pool.preallocate(1);
// Now try_get should succeed
assert!(pool.try_get().is_some());
// Now try_get should succeed once while the buffer is held
let buf = pool.try_get();
assert!(buf.is_some());
// While buffer is held, pool is empty
assert!(pool.try_get().is_none());
// Drop buffer -> returns to pool, should be obtainable again
drop(buf);
assert!(pool.try_get().is_some());
}
#[test]
@@ -448,4 +453,4 @@ mod tests {
// All buffers should be returned
assert!(stats.pooled > 0);
}
}
}

View File

@@ -32,7 +32,7 @@
//! and uploads from iOS will break (media/file sending), while small traffic
//! may still work.
use bytes::{Bytes, BytesMut, BufMut};
use bytes::{Bytes, BytesMut};
use std::io::{self, Error, ErrorKind, Result};
use std::pin::Pin;
use std::task::{Context, Poll};
@@ -51,9 +51,10 @@ use super::state::{StreamState, HeaderBuffer, YieldBuffer, WriteBuffer};
/// TLS record header size (type + version + length)
const TLS_HEADER_SIZE: usize = 5;
/// Maximum TLS fragment size per spec (plaintext fragment).
/// We use this for *outgoing* chunking, because we build plain ApplicationData records.
const MAX_TLS_PAYLOAD: usize = 16384;
/// Maximum TLS fragment size we emit for Application Data.
/// Real TLS 1.3 ciphertexts often add ~16-24 bytes AEAD overhead, so to mimic
/// on-the-wire record sizes we allow up to 16384 + 24 bytes of plaintext.
const MAX_TLS_PAYLOAD: usize = 16384 + 24;
/// Maximum pending write buffer for one record remainder.
/// Note: we never queue unlimited amount of data here; state holds at most one record.
@@ -918,10 +919,8 @@ mod tests {
let reader = ChunkedReader::new(&record, 100);
let mut tls_reader = FakeTlsReader::new(reader);
let mut buf = vec![0u8; payload.len()];
tls_reader.read_exact(&mut buf).await.unwrap();
assert_eq!(&buf, payload);
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
assert_eq!(&buf[..], payload);
}
#[tokio::test]
@@ -935,13 +934,11 @@ mod tests {
let reader = ChunkedReader::new(&data, 100);
let mut tls_reader = FakeTlsReader::new(reader);
let mut buf1 = vec![0u8; payload1.len()];
tls_reader.read_exact(&mut buf1).await.unwrap();
assert_eq!(&buf1, payload1);
let buf1 = tls_reader.read_exact(payload1.len()).await.unwrap();
assert_eq!(&buf1[..], payload1);
let mut buf2 = vec![0u8; payload2.len()];
tls_reader.read_exact(&mut buf2).await.unwrap();
assert_eq!(&buf2, payload2);
let buf2 = tls_reader.read_exact(payload2.len()).await.unwrap();
assert_eq!(&buf2[..], payload2);
}
#[tokio::test]
@@ -953,10 +950,9 @@ mod tests {
let reader = ChunkedReader::new(&record, 1); // 1 byte at a time!
let mut tls_reader = FakeTlsReader::new(reader);
let mut buf = vec![0u8; payload.len()];
tls_reader.read_exact(&mut buf).await.unwrap();
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
assert_eq!(&buf, payload);
assert_eq!(&buf[..], payload);
}
#[tokio::test]
@@ -967,10 +963,9 @@ mod tests {
let reader = ChunkedReader::new(&record, 7); // Awkward chunk size
let mut tls_reader = FakeTlsReader::new(reader);
let mut buf = vec![0u8; payload.len()];
tls_reader.read_exact(&mut buf).await.unwrap();
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
assert_eq!(&buf, payload);
assert_eq!(&buf[..], payload);
}
#[tokio::test]
@@ -983,10 +978,9 @@ mod tests {
let reader = ChunkedReader::new(&data, 100);
let mut tls_reader = FakeTlsReader::new(reader);
let mut buf = vec![0u8; payload.len()];
tls_reader.read_exact(&mut buf).await.unwrap();
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
assert_eq!(&buf, payload);
assert_eq!(&buf[..], payload);
}
#[tokio::test]
@@ -1000,10 +994,9 @@ mod tests {
let reader = ChunkedReader::new(&data, 3); // Small chunks
let mut tls_reader = FakeTlsReader::new(reader);
let mut buf = vec![0u8; payload.len()];
tls_reader.read_exact(&mut buf).await.unwrap();
let buf = tls_reader.read_exact(payload.len()).await.unwrap();
assert_eq!(&buf, payload);
assert_eq!(&buf[..], payload);
}
#[tokio::test]
@@ -1244,4 +1237,4 @@ mod tests {
let bytes = header.to_bytes();
assert_eq!(bytes, [0x17, 0x03, 0x03, 0x12, 0x34]);
}
}
}

View File

@@ -1,925 +0,0 @@
//! Middle Proxy RPC Transport
//!
//! Implements Telegram Middle-End RPC protocol for routing to ALL DCs (including CDN).
//!
//! ## Phase 3 fixes:
//! - ROOT CAUSE: Use Telegram proxy-secret (binary file) not user secret
//! - Streaming handshake response (no fixed-size read deadlock)
//! - Health monitoring + reconnection
//! - Hex diagnostics for debugging
use std::collections::HashMap;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Duration;
use bytes::{Bytes, BytesMut};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpStream;
use tokio::sync::{mpsc, Mutex, RwLock};
use tokio::time::{timeout, Instant};
use tracing::{debug, info, trace, warn, error};
use crate::crypto::{crc32, derive_middleproxy_keys, AesCbc, SecureRandom};
use crate::error::{ProxyError, Result};
use crate::protocol::constants::*;
// ========== Proxy Secret Fetching ==========
/// Fetch the Telegram proxy-secret binary file.
///
/// This is NOT the user secret (-S flag, 16 bytes hex for clients).
/// This is the infrastructure secret (--aes-pwd in C MTProxy),
/// a binary file of 32-512 bytes used for ME RPC key derivation.
///
/// Strategy: try local cache, then download from Telegram.
pub async fn fetch_proxy_secret(cache_path: Option<&str>) -> Result<Vec<u8>> {
let cache = cache_path.unwrap_or("proxy-secret");
// 1. Try local cache (< 24h old)
if let Ok(metadata) = tokio::fs::metadata(cache).await {
if let Ok(modified) = metadata.modified() {
let age = std::time::SystemTime::now()
.duration_since(modified)
.unwrap_or(Duration::from_secs(u64::MAX));
if age < Duration::from_secs(86400) {
if let Ok(data) = tokio::fs::read(cache).await {
if data.len() >= 32 {
info!(
path = cache,
len = data.len(),
age_hours = age.as_secs() / 3600,
"Loaded proxy-secret from cache"
);
return Ok(data);
}
warn!(path = cache, len = data.len(), "Cached proxy-secret too short");
}
}
}
}
// 2. Download from Telegram
info!("Downloading proxy-secret from core.telegram.org...");
let data = download_proxy_secret().await?;
// 3. Cache locally (best-effort)
if let Err(e) = tokio::fs::write(cache, &data).await {
warn!(error = %e, "Failed to cache proxy-secret (non-fatal)");
} else {
debug!(path = cache, len = data.len(), "Cached proxy-secret");
}
Ok(data)
}
async fn download_proxy_secret() -> Result<Vec<u8>> {
let url = "https://core.telegram.org/getProxySecret";
let resp = reqwest::get(url)
.await
.map_err(|e| ProxyError::Proxy(format!("Failed to download proxy-secret: {}", e)))?;
if !resp.status().is_success() {
return Err(ProxyError::Proxy(format!(
"proxy-secret download HTTP {}", resp.status()
)));
}
let data = resp.bytes().await
.map_err(|e| ProxyError::Proxy(format!("Read proxy-secret body: {}", e)))?
.to_vec();
if data.len() < 32 {
return Err(ProxyError::Proxy(format!(
"proxy-secret too short: {} bytes (need >= 32)", data.len()
)));
}
info!(len = data.len(), "Downloaded proxy-secret OK");
Ok(data)
}
// ========== RPC Frame helpers ==========
/// Build an RPC frame: [len(4) | seq_no(4) | payload | crc32(4)]
fn build_rpc_frame(seq_no: i32, payload: &[u8]) -> Vec<u8> {
let total_len = (4 + 4 + payload.len() + 4) as u32;
let mut f = Vec::with_capacity(total_len as usize);
f.extend_from_slice(&total_len.to_le_bytes());
f.extend_from_slice(&seq_no.to_le_bytes());
f.extend_from_slice(payload);
let c = crc32(&f);
f.extend_from_slice(&c.to_le_bytes());
f
}
/// Read one plaintext RPC frame. Returns (seq_no, payload).
async fn read_rpc_frame_plaintext(
rd: &mut (impl AsyncReadExt + Unpin),
) -> Result<(i32, Vec<u8>)> {
let mut len_buf = [0u8; 4];
rd.read_exact(&mut len_buf).await.map_err(ProxyError::Io)?;
let total_len = u32::from_le_bytes(len_buf) as usize;
if total_len < 12 || total_len > (1 << 24) {
return Err(ProxyError::InvalidHandshake(
format!("Bad RPC frame length: {}", total_len),
));
}
let mut rest = vec![0u8; total_len - 4];
rd.read_exact(&mut rest).await.map_err(ProxyError::Io)?;
let mut full = Vec::with_capacity(total_len);
full.extend_from_slice(&len_buf);
full.extend_from_slice(&rest);
let crc_offset = total_len - 4;
let expected_crc = u32::from_le_bytes([
full[crc_offset], full[crc_offset + 1],
full[crc_offset + 2], full[crc_offset + 3],
]);
let actual_crc = crc32(&full[..crc_offset]);
if expected_crc != actual_crc {
return Err(ProxyError::InvalidHandshake(
format!("CRC mismatch: 0x{:08x} vs 0x{:08x}", expected_crc, actual_crc),
));
}
let seq_no = i32::from_le_bytes([full[4], full[5], full[6], full[7]]);
let payload = full[8..crc_offset].to_vec();
Ok((seq_no, payload))
}
// ========== RPC Nonce (32 bytes payload) ==========
fn build_nonce_payload(key_selector: u32, crypto_ts: u32, nonce: &[u8; 16]) -> [u8; 32] {
let mut p = [0u8; 32];
p[0..4].copy_from_slice(&RPC_NONCE_U32.to_le_bytes());
p[4..8].copy_from_slice(&key_selector.to_le_bytes());
p[8..12].copy_from_slice(&RPC_CRYPTO_AES_U32.to_le_bytes());
p[12..16].copy_from_slice(&crypto_ts.to_le_bytes());
p[16..32].copy_from_slice(nonce);
p
}
fn parse_nonce_payload(d: &[u8]) -> Result<(u32, u32, [u8; 16])> {
if d.len() < 32 {
return Err(ProxyError::InvalidHandshake(
format!("Nonce payload too short: {} bytes", d.len()),
));
}
let t = u32::from_le_bytes([d[0], d[1], d[2], d[3]]);
if t != RPC_NONCE_U32 {
return Err(ProxyError::InvalidHandshake(
format!("Expected RPC_NONCE 0x{:08x}, got 0x{:08x}", RPC_NONCE_U32, t),
));
}
let schema = u32::from_le_bytes([d[8], d[9], d[10], d[11]]);
let ts = u32::from_le_bytes([d[12], d[13], d[14], d[15]]);
let mut nonce = [0u8; 16];
nonce.copy_from_slice(&d[16..32]);
Ok((schema, ts, nonce))
}
// ========== RPC Handshake (32 bytes payload) ==========
fn build_handshake_payload(our_ip: u32, our_port: u16, peer_ip: u32, peer_port: u16) -> [u8; 32] {
let mut p = [0u8; 32];
p[0..4].copy_from_slice(&RPC_HANDSHAKE_U32.to_le_bytes());
// flags = 0 at offset 4..8
// sender_pid: {ip(4), port(2), pid(2), utime(4)} at offset 8..20
p[8..12].copy_from_slice(&our_ip.to_le_bytes());
p[12..14].copy_from_slice(&our_port.to_le_bytes());
let pid = (std::process::id() & 0xFFFF) as u16;
p[14..16].copy_from_slice(&pid.to_le_bytes());
let utime = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs() as u32;
p[16..20].copy_from_slice(&utime.to_le_bytes());
// peer_pid: {ip(4), port(2), pid(2), utime(4)} at offset 20..32
p[20..24].copy_from_slice(&peer_ip.to_le_bytes());
p[24..26].copy_from_slice(&peer_port.to_le_bytes());
p
}
// ========== CBC helpers ==========
fn cbc_encrypt_padded(key: &[u8; 32], iv: &[u8; 16], plaintext: &[u8]) -> Result<(Vec<u8>, [u8; 16])> {
let pad = (16 - (plaintext.len() % 16)) % 16;
let mut buf = plaintext.to_vec();
let pad_pattern: [u8; 4] = [0x04, 0x00, 0x00, 0x00];
for i in 0..pad {
buf.push(pad_pattern[i % 4]);
}
let cipher = AesCbc::new(*key, *iv);
cipher.encrypt_in_place(&mut buf)
.map_err(|e| ProxyError::Crypto(format!("CBC encrypt: {}", e)))?;
let mut new_iv = [0u8; 16];
if buf.len() >= 16 {
new_iv.copy_from_slice(&buf[buf.len() - 16..]);
}
Ok((buf, new_iv))
}
fn cbc_decrypt_inplace(key: &[u8; 32], iv: &[u8; 16], data: &mut [u8]) -> Result<[u8; 16]> {
let mut new_iv = [0u8; 16];
if data.len() >= 16 {
new_iv.copy_from_slice(&data[data.len() - 16..]);
}
AesCbc::new(*key, *iv)
.decrypt_in_place(data)
.map_err(|e| ProxyError::Crypto(format!("CBC decrypt: {}", e)))?;
Ok(new_iv)
}
// ========== IPv4 helpers ==========
fn ipv4_to_mapped_v6(ip: Ipv4Addr) -> [u8; 16] {
let mut buf = [0u8; 16];
buf[10] = 0xFF;
buf[11] = 0xFF;
let o = ip.octets();
buf[12] = o[0]; buf[13] = o[1]; buf[14] = o[2]; buf[15] = o[3];
buf
}
fn addr_to_ip_u32(addr: &SocketAddr) -> u32 {
match addr.ip() {
IpAddr::V4(v4) => u32::from_be_bytes(v4.octets()),
IpAddr::V6(v6) => {
if let Some(v4) = v6.to_ipv4_mapped() {
u32::from_be_bytes(v4.octets())
} else { 0 }
}
}
}
// ========== ME Response ==========
#[derive(Debug)]
pub enum MeResponse {
Data(Bytes),
Ack(u32),
Close,
}
// ========== Connection Registry ==========
pub struct ConnRegistry {
map: RwLock<HashMap<u64, mpsc::Sender<MeResponse>>>,
next_id: AtomicU64,
}
impl ConnRegistry {
pub fn new() -> Self {
Self {
map: RwLock::new(HashMap::new()),
next_id: AtomicU64::new(1),
}
}
pub async fn register(&self) -> (u64, mpsc::Receiver<MeResponse>) {
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
let (tx, rx) = mpsc::channel(256);
self.map.write().await.insert(id, tx);
(id, rx)
}
pub async fn unregister(&self, id: u64) {
self.map.write().await.remove(&id);
}
pub async fn route(&self, id: u64, resp: MeResponse) -> bool {
let m = self.map.read().await;
if let Some(tx) = m.get(&id) {
tx.send(resp).await.is_ok()
} else { false }
}
}
// ========== RPC Writer (streaming CBC) ==========
struct RpcWriter {
writer: tokio::io::WriteHalf<TcpStream>,
key: [u8; 32],
iv: [u8; 16],
seq_no: i32,
}
impl RpcWriter {
async fn send(&mut self, payload: &[u8]) -> Result<()> {
let frame = build_rpc_frame(self.seq_no, payload);
self.seq_no += 1;
let pad = (16 - (frame.len() % 16)) % 16;
let mut buf = frame;
let pad_pattern: [u8; 4] = [0x04, 0x00, 0x00, 0x00];
for i in 0..pad {
buf.push(pad_pattern[i % 4]);
}
let cipher = AesCbc::new(self.key, self.iv);
cipher.encrypt_in_place(&mut buf)
.map_err(|e| ProxyError::Crypto(format!("{}", e)))?;
if buf.len() >= 16 {
self.iv.copy_from_slice(&buf[buf.len() - 16..]);
}
self.writer.write_all(&buf).await.map_err(ProxyError::Io)
}
}
// ========== RPC_PROXY_REQ ==========
fn build_proxy_req_payload(
conn_id: u64,
client_addr: SocketAddr,
our_addr: SocketAddr,
data: &[u8],
proxy_tag: Option<&[u8]>,
proto_flags: u32,
) -> Vec<u8> {
// flags are pre-calculated by proto_flags_for_tag
// We just need to ensure FLAG_HAS_AD_TAG is set if we have a tag (it is set by default in our new function, but let's be safe)
let mut flags = proto_flags;
// The C code logic:
// flags = (transport_flags) | 0x1000 | 0x20000 | 0x8 (if tag)
// Our proto_flags_for_tag returns: 0x8 | 0x1000 | 0x20000 | transport_flags
// So we are good.
let b_cap = 128 + data.len();
let mut b = Vec::with_capacity(b_cap);
b.extend_from_slice(&RPC_PROXY_REQ_U32.to_le_bytes());
b.extend_from_slice(&flags.to_le_bytes());
b.extend_from_slice(&conn_id.to_le_bytes());
// Client IP (16 bytes IPv4-mapped-v6) + port (4 bytes)
match client_addr.ip() {
IpAddr::V4(v4) => b.extend_from_slice(&ipv4_to_mapped_v6(v4)),
IpAddr::V6(v6) => b.extend_from_slice(&v6.octets()),
}
b.extend_from_slice(&(client_addr.port() as u32).to_le_bytes());
// Our IP (16 bytes) + port (4 bytes)
match our_addr.ip() {
IpAddr::V4(v4) => b.extend_from_slice(&ipv4_to_mapped_v6(v4)),
IpAddr::V6(v6) => b.extend_from_slice(&v6.octets()),
}
b.extend_from_slice(&(our_addr.port() as u32).to_le_bytes());
// Extra section (proxy_tag)
if flags & 12 != 0 {
let extra_start = b.len();
b.extend_from_slice(&0u32.to_le_bytes()); // placeholder
if let Some(tag) = proxy_tag {
b.extend_from_slice(&TL_PROXY_TAG_U32.to_le_bytes());
// TL string encoding
if tag.len() < 254 {
b.push(tag.len() as u8);
b.extend_from_slice(tag);
let pad = (4 - ((1 + tag.len()) % 4)) % 4;
b.extend(std::iter::repeat(0u8).take(pad));
} else {
b.push(0xfe);
let len_bytes = (tag.len() as u32).to_le_bytes();
b.extend_from_slice(&len_bytes[..3]);
b.extend_from_slice(tag);
let pad = (4 - (tag.len() % 4)) % 4;
b.extend(std::iter::repeat(0u8).take(pad));
}
}
let extra_bytes = (b.len() - extra_start - 4) as u32;
let eb = extra_bytes.to_le_bytes();
b[extra_start..extra_start + 4].copy_from_slice(&eb);
}
b.extend_from_slice(data);
b
}
// ========== ME Pool ==========
pub struct MePool {
registry: Arc<ConnRegistry>,
writers: Arc<RwLock<Vec<Arc<Mutex<RpcWriter>>>>>,
rr: AtomicU64,
proxy_tag: Option<Vec<u8>>,
/// Telegram proxy-secret (binary, 32-512 bytes)
proxy_secret: Vec<u8>,
pool_size: usize,
}
impl MePool {
pub fn new(proxy_tag: Option<Vec<u8>>, proxy_secret: Vec<u8>) -> Arc<Self> {
Arc::new(Self {
registry: Arc::new(ConnRegistry::new()),
writers: Arc::new(RwLock::new(Vec::new())),
rr: AtomicU64::new(0),
proxy_tag,
proxy_secret,
pool_size: 2,
})
}
pub fn registry(&self) -> &Arc<ConnRegistry> {
&self.registry
}
fn writers_arc(&self) -> Arc<RwLock<Vec<Arc<Mutex<RpcWriter>>>>> {
self.writers.clone()
}
/// key_selector = first 4 bytes of proxy-secret as LE u32
/// C: main_secret.key_signature via union { char secret[]; int key_signature; }
fn key_selector(&self) -> u32 {
if self.proxy_secret.len() >= 4 {
u32::from_le_bytes([
self.proxy_secret[0], self.proxy_secret[1],
self.proxy_secret[2], self.proxy_secret[3],
])
} else { 0 }
}
pub async fn init(
self: &Arc<Self>,
pool_size: usize,
rng: &SecureRandom,
) -> Result<()> {
let addrs = &*TG_MIDDLE_PROXIES_FLAT_V4;
let ks = self.key_selector();
info!(
me_servers = addrs.len(),
pool_size,
key_selector = format_args!("0x{:08x}", ks),
secret_len = self.proxy_secret.len(),
"Initializing ME pool"
);
for &(ip, port) in addrs.iter() {
for i in 0..pool_size {
let addr = SocketAddr::new(ip, port);
match self.connect_one(addr, rng).await {
Ok(()) => info!(%addr, idx = i, "ME connected"),
Err(e) => warn!(%addr, idx = i, error = %e, "ME connect failed"),
}
}
if self.writers.read().await.len() >= pool_size {
break;
}
}
if self.writers.read().await.is_empty() {
return Err(ProxyError::Proxy("No ME connections".into()));
}
Ok(())
}
async fn connect_one(
self: &Arc<Self>,
addr: SocketAddr,
rng: &SecureRandom,
) -> Result<()> {
let secret = &self.proxy_secret;
if secret.len() < 32 {
return Err(ProxyError::Proxy("proxy-secret too short for ME auth".into()));
}
// ===== TCP connect =====
let stream = timeout(
Duration::from_secs(ME_CONNECT_TIMEOUT_SECS),
TcpStream::connect(addr),
)
.await
.map_err(|_| ProxyError::ConnectionTimeout { addr: addr.to_string() })?
.map_err(ProxyError::Io)?;
stream.set_nodelay(true).ok();
let local_addr = stream.local_addr().map_err(ProxyError::Io)?;
let peer_addr = stream.peer_addr().map_err(ProxyError::Io)?;
let (mut rd, mut wr) = tokio::io::split(stream);
// ===== 1. Send RPC nonce (plaintext, seq=-2) =====
let my_nonce: [u8; 16] = rng.bytes(16).try_into().unwrap();
let crypto_ts = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs() as u32;
let ks = self.key_selector();
let nonce_payload = build_nonce_payload(ks, crypto_ts, &my_nonce);
let nonce_frame = build_rpc_frame(-2, &nonce_payload);
debug!(
%addr,
frame_len = nonce_frame.len(),
key_sel = format_args!("0x{:08x}", ks),
crypto_ts,
"Sending nonce"
);
wr.write_all(&nonce_frame).await.map_err(ProxyError::Io)?;
wr.flush().await.map_err(ProxyError::Io)?;
// ===== 2. Read server nonce (plaintext, seq=-2) =====
let (srv_seq, srv_nonce_payload) = timeout(
Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS),
read_rpc_frame_plaintext(&mut rd),
)
.await
.map_err(|_| ProxyError::TgHandshakeTimeout)??;
if srv_seq != -2 {
return Err(ProxyError::InvalidHandshake(
format!("Expected seq=-2, got {}", srv_seq),
));
}
let (schema, _srv_ts, srv_nonce) = parse_nonce_payload(&srv_nonce_payload)?;
if schema != RPC_CRYPTO_AES_U32 {
return Err(ProxyError::InvalidHandshake(
format!("Unsupported crypto schema: 0x{:x}", schema),
));
}
debug!(%addr, "Nonce exchange OK, deriving keys");
// ===== 3. Derive AES-256-CBC keys =====
// C buffer layout:
// [0..16] nonce_server (srv_nonce)
// [16..32] nonce_client (my_nonce)
// [32..36] client_timestamp
// [36..40] server_ip
// [40..42] client_port
// [42..48] "CLIENT" or "SERVER"
// [48..52] client_ip
// [52..54] server_port
// [54..54+N] secret (proxy-secret binary)
// [54+N..70+N] nonce_server
// nonce_client(16)
let ts_bytes = crypto_ts.to_le_bytes();
let server_ip = addr_to_ip_u32(&peer_addr);
let client_ip = addr_to_ip_u32(&local_addr);
let server_ip_bytes = server_ip.to_le_bytes();
let client_ip_bytes = client_ip.to_le_bytes();
let server_port_bytes = peer_addr.port().to_le_bytes();
let client_port_bytes = local_addr.port().to_le_bytes();
let (wk, wi) = derive_middleproxy_keys(
&srv_nonce, &my_nonce, &ts_bytes,
Some(&server_ip_bytes), &client_port_bytes,
b"CLIENT",
Some(&client_ip_bytes), &server_port_bytes,
secret, None, None,
);
let (rk, ri) = derive_middleproxy_keys(
&srv_nonce, &my_nonce, &ts_bytes,
Some(&server_ip_bytes), &client_port_bytes,
b"SERVER",
Some(&client_ip_bytes), &server_port_bytes,
secret, None, None,
);
debug!(
%addr,
write_key = %hex::encode(&wk[..8]),
read_key = %hex::encode(&rk[..8]),
"Keys derived"
);
// ===== 4. Send encrypted handshake (seq=-1) =====
let hs_payload = build_handshake_payload(
client_ip, local_addr.port(),
server_ip, peer_addr.port(),
);
let hs_frame = build_rpc_frame(-1, &hs_payload);
let (encrypted_hs, write_iv) = cbc_encrypt_padded(&wk, &wi, &hs_frame)?;
wr.write_all(&encrypted_hs).await.map_err(ProxyError::Io)?;
wr.flush().await.map_err(ProxyError::Io)?;
debug!(%addr, enc_len = encrypted_hs.len(), "Sent encrypted handshake");
// ===== 5. Read encrypted handshake response (STREAMING) =====
// Server sends encrypted handshake. C crypto layer may send partial
// blocks (only complete 16-byte blocks get encrypted at a time).
// We read incrementally and decrypt block-by-block.
let deadline = Instant::now() + Duration::from_secs(ME_HANDSHAKE_TIMEOUT_SECS);
let mut enc_buf = BytesMut::with_capacity(256);
let mut dec_buf = BytesMut::with_capacity(256);
let mut read_iv = ri;
let mut handshake_ok = false;
while Instant::now() < deadline && !handshake_ok {
let remaining = deadline - Instant::now();
let mut tmp = [0u8; 256];
let n = match timeout(remaining, rd.read(&mut tmp)).await {
Ok(Ok(0)) => return Err(ProxyError::Io(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof, "ME closed during handshake",
))),
Ok(Ok(n)) => n,
Ok(Err(e)) => return Err(ProxyError::Io(e)),
Err(_) => return Err(ProxyError::TgHandshakeTimeout),
};
enc_buf.extend_from_slice(&tmp[..n]);
// Decrypt complete 16-byte blocks
let blocks = enc_buf.len() / 16 * 16;
if blocks > 0 {
let mut chunk = vec![0u8; blocks];
chunk.copy_from_slice(&enc_buf[..blocks]);
let new_iv = cbc_decrypt_inplace(&rk, &read_iv, &mut chunk)?;
read_iv = new_iv;
dec_buf.extend_from_slice(&chunk);
let _ = enc_buf.split_to(blocks);
}
// Try to parse RPC frame from decrypted data
while dec_buf.len() >= 4 {
let fl = u32::from_le_bytes([
dec_buf[0], dec_buf[1], dec_buf[2], dec_buf[3],
]) as usize;
// Skip noop padding
if fl == 4 {
let _ = dec_buf.split_to(4);
continue;
}
if fl < 12 || fl > (1 << 24) {
return Err(ProxyError::InvalidHandshake(
format!("Bad HS response frame len: {}", fl),
));
}
if dec_buf.len() < fl {
break; // need more data
}
let frame = dec_buf.split_to(fl);
// CRC32 check
let pe = fl - 4;
let ec = u32::from_le_bytes([
frame[pe], frame[pe + 1], frame[pe + 2], frame[pe + 3],
]);
let ac = crc32(&frame[..pe]);
if ec != ac {
return Err(ProxyError::InvalidHandshake(
format!("HS CRC mismatch: 0x{:08x} vs 0x{:08x}", ec, ac),
));
}
// Check type
let hs_type = u32::from_le_bytes([
frame[8], frame[9], frame[10], frame[11],
]);
if hs_type == RPC_HANDSHAKE_ERROR_U32 {
let err_code = if frame.len() >= 16 {
i32::from_le_bytes([frame[12], frame[13], frame[14], frame[15]])
} else { -1 };
return Err(ProxyError::InvalidHandshake(
format!("ME rejected handshake (error={})", err_code),
));
}
if hs_type != RPC_HANDSHAKE_U32 {
return Err(ProxyError::InvalidHandshake(
format!("Expected HANDSHAKE 0x{:08x}, got 0x{:08x}", RPC_HANDSHAKE_U32, hs_type),
));
}
handshake_ok = true;
break;
}
}
if !handshake_ok {
return Err(ProxyError::TgHandshakeTimeout);
}
info!(%addr, "RPC handshake OK");
// ===== 6. Setup writer + reader =====
let rpc_w = Arc::new(Mutex::new(RpcWriter {
writer: wr,
key: wk,
iv: write_iv,
seq_no: 0,
}));
self.writers.write().await.push(rpc_w.clone());
let reg = self.registry.clone();
let w_pong = rpc_w.clone();
let w_pool = self.writers_arc();
tokio::spawn(async move {
if let Err(e) = reader_loop(rd, rk, read_iv, reg, enc_buf, dec_buf, w_pong.clone()).await {
warn!(error = %e, "ME reader ended");
}
// Remove dead writer from pool
let mut ws = w_pool.write().await;
ws.retain(|w| !Arc::ptr_eq(w, &w_pong));
info!(remaining = ws.len(), "Dead ME writer removed from pool");
});
Ok(())
}
pub async fn send_proxy_req(
&self,
conn_id: u64,
client_addr: SocketAddr,
our_addr: SocketAddr,
data: &[u8],
proto_flags: u32,
) -> Result<()> {
let payload = build_proxy_req_payload(
conn_id, client_addr, our_addr, data,
self.proxy_tag.as_deref(), proto_flags,
);
loop {
let ws = self.writers.read().await;
if ws.is_empty() {
return Err(ProxyError::Proxy("All ME connections dead".into()));
}
let idx = self.rr.fetch_add(1, Ordering::Relaxed) as usize % ws.len();
let w = ws[idx].clone();
drop(ws);
match w.lock().await.send(&payload).await {
Ok(()) => return Ok(()),
Err(e) => {
warn!(error = %e, "ME write failed, removing dead conn");
let mut ws = self.writers.write().await;
ws.retain(|o| !Arc::ptr_eq(o, &w));
if ws.is_empty() {
return Err(ProxyError::Proxy("All ME connections dead".into()));
}
}
}
}
}
pub async fn send_close(&self, conn_id: u64) -> Result<()> {
let ws = self.writers.read().await;
if !ws.is_empty() {
let w = ws[0].clone();
drop(ws);
let mut p = Vec::with_capacity(12);
p.extend_from_slice(&RPC_CLOSE_EXT_U32.to_le_bytes());
p.extend_from_slice(&conn_id.to_le_bytes());
if let Err(e) = w.lock().await.send(&p).await {
debug!(error = %e, "ME close write failed");
let mut ws = self.writers.write().await;
ws.retain(|o| !Arc::ptr_eq(o, &w));
}
}
self.registry.unregister(conn_id).await;
Ok(())
}
pub fn connection_count(&self) -> usize {
self.writers.try_read().map(|w| w.len()).unwrap_or(0)
}
}
// ========== Reader Loop ==========
async fn reader_loop(
mut rd: tokio::io::ReadHalf<TcpStream>,
dk: [u8; 32],
mut div: [u8; 16],
reg: Arc<ConnRegistry>,
mut enc_leftover: BytesMut,
mut dec: BytesMut,
writer: Arc<Mutex<RpcWriter>>,
) -> Result<()> {
let mut raw = enc_leftover;
loop {
let mut tmp = [0u8; 16384];
let n = rd.read(&mut tmp).await.map_err(ProxyError::Io)?;
if n == 0 { return Ok(()); }
raw.extend_from_slice(&tmp[..n]);
// Decrypt complete 16-byte blocks
let blocks = raw.len() / 16 * 16;
if blocks > 0 {
let mut new_iv = [0u8; 16];
new_iv.copy_from_slice(&raw[blocks - 16..blocks]);
let mut chunk = vec![0u8; blocks];
chunk.copy_from_slice(&raw[..blocks]);
AesCbc::new(dk, div)
.decrypt_in_place(&mut chunk)
.map_err(|e| ProxyError::Crypto(format!("{}", e)))?;
div = new_iv;
dec.extend_from_slice(&chunk);
let _ = raw.split_to(blocks);
}
// Parse RPC frames
while dec.len() >= 12 {
let fl = u32::from_le_bytes([dec[0], dec[1], dec[2], dec[3]]) as usize;
if fl == 4 { let _ = dec.split_to(4); continue; }
if fl < 12 || fl > (1 << 24) {
warn!(frame_len = fl, "Invalid RPC frame len");
dec.clear();
break;
}
if dec.len() < fl { break; }
let frame = dec.split_to(fl);
let pe = fl - 4;
let ec = u32::from_le_bytes([frame[pe], frame[pe+1], frame[pe+2], frame[pe+3]]);
if crc32(&frame[..pe]) != ec {
warn!("CRC mismatch in data frame");
continue;
}
let payload = &frame[8..pe];
if payload.len() < 4 { continue; }
let pt = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]);
let body = &payload[4..];
if pt == RPC_PROXY_ANS_U32 && body.len() >= 12 {
let flags = u32::from_le_bytes(body[0..4].try_into().unwrap());
let cid = u64::from_le_bytes(body[4..12].try_into().unwrap());
let data = Bytes::copy_from_slice(&body[12..]);
trace!(cid, len = data.len(), flags, "ANS");
reg.route(cid, MeResponse::Data(data)).await;
} else if pt == RPC_SIMPLE_ACK_U32 && body.len() >= 12 {
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
let cfm = u32::from_le_bytes(body[8..12].try_into().unwrap());
trace!(cid, cfm, "ACK");
reg.route(cid, MeResponse::Ack(cfm)).await;
} else if pt == RPC_CLOSE_EXT_U32 && body.len() >= 8 {
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
debug!(cid, "CLOSE_EXT from ME");
reg.route(cid, MeResponse::Close).await;
reg.unregister(cid).await;
} else if pt == RPC_CLOSE_CONN_U32 && body.len() >= 8 {
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
debug!(cid, "CLOSE_CONN from ME");
reg.route(cid, MeResponse::Close).await;
reg.unregister(cid).await;
} else if pt == RPC_PING_U32 && body.len() >= 8 {
let ping_id = i64::from_le_bytes(body[0..8].try_into().unwrap());
trace!(ping_id, "RPC_PING -> PONG");
let mut pong = Vec::with_capacity(12);
pong.extend_from_slice(&RPC_PONG_U32.to_le_bytes());
pong.extend_from_slice(&ping_id.to_le_bytes());
if let Err(e) = writer.lock().await.send(&pong).await {
warn!(error = %e, "PONG send failed");
break;
}
} else {
debug!(rpc_type = format_args!("0x{:08x}", pt), len = body.len(), "Unknown RPC");
}
}
}
}
// ========== Proto flags ==========
/// Map ProtoTag to C-compatible RPC_PROXY_REQ transport flags.
/// C: RPC_F_COMPACT(0x40000000)=abridged, RPC_F_MEDIUM(0x20000000)=intermediate/secure
/// The 0x1000(magic) and 0x8(proxy_tag) are added inside build_proxy_req_payload.
pub fn proto_flags_for_tag(tag: crate::protocol::constants::ProtoTag) -> u32 {
use crate::protocol::constants::*;
let mut flags = RPC_FLAG_HAS_AD_TAG | RPC_FLAG_MAGIC | RPC_FLAG_EXTMODE2;
match tag {
ProtoTag::Abridged => flags | RPC_FLAG_ABRIDGED,
ProtoTag::Intermediate => flags | RPC_FLAG_INTERMEDIATE,
ProtoTag::Secure => flags | RPC_FLAG_PAD | RPC_FLAG_INTERMEDIATE,
}
}
// ========== Health Monitor (Phase 4) ==========
pub async fn me_health_monitor(
pool: Arc<MePool>,
rng: Arc<SecureRandom>,
min_connections: usize,
) {
loop {
tokio::time::sleep(Duration::from_secs(30)).await;
let current = pool.writers.read().await.len();
if current < min_connections {
warn!(current, min = min_connections, "ME pool below minimum, reconnecting...");
let addrs = TG_MIDDLE_PROXIES_FLAT_V4.clone();
for &(ip, port) in addrs.iter() {
let needed = min_connections.saturating_sub(pool.writers.read().await.len());
if needed == 0 { break; }
for _ in 0..needed {
let addr = SocketAddr::new(ip, port);
match pool.connect_one(addr, &rng).await {
Ok(()) => info!(%addr, "ME reconnected"),
Err(e) => debug!(%addr, error = %e, "ME reconnect failed"),
}
}
}
}
}
}

View File

@@ -0,0 +1,91 @@
use std::collections::HashMap;
use std::net::IpAddr;
use std::sync::Arc;
use std::time::Duration;
use regex::Regex;
use tracing::{debug, info, warn};
use crate::error::Result;
use super::MePool;
use super::secret::download_proxy_secret;
use crate::crypto::SecureRandom;
#[derive(Debug, Clone, Default)]
pub struct ProxyConfigData {
pub map: HashMap<i32, Vec<(IpAddr, u16)>>,
pub default_dc: Option<i32>,
}
pub async fn fetch_proxy_config(url: &str) -> Result<ProxyConfigData> {
let text = reqwest::get(url)
.await
.map_err(|e| crate::error::ProxyError::Proxy(format!("fetch_proxy_config GET failed: {e}")))?
.text()
.await
.map_err(|e| crate::error::ProxyError::Proxy(format!("fetch_proxy_config read failed: {e}")))?;
let re_proxy = Regex::new(r"proxy_for\s+(-?\d+)\s+([^\s:]+):(\d+)\s*;").unwrap();
let re_default = Regex::new(r"default\s+(-?\d+)\s*;").unwrap();
let mut map: HashMap<i32, Vec<(IpAddr, u16)>> = HashMap::new();
for cap in re_proxy.captures_iter(&text) {
if let (Some(dc), Some(host), Some(port)) = (cap.get(1), cap.get(2), cap.get(3)) {
if let Ok(dc_idx) = dc.as_str().parse::<i32>() {
if let Ok(ip) = host.as_str().parse::<IpAddr>() {
if let Ok(port_num) = port.as_str().parse::<u16>() {
map.entry(dc_idx).or_default().push((ip, port_num));
}
}
}
}
}
let default_dc = re_default
.captures(&text)
.and_then(|c| c.get(1))
.and_then(|m| m.as_str().parse::<i32>().ok());
Ok(ProxyConfigData { map, default_dc })
}
pub async fn me_config_updater(pool: Arc<MePool>, rng: Arc<SecureRandom>, interval: Duration) {
let mut tick = tokio::time::interval(interval);
// skip immediate tick to avoid double-fetch right after startup
tick.tick().await;
loop {
tick.tick().await;
// Update proxy config v4
if let Ok(cfg) = fetch_proxy_config("https://core.telegram.org/getProxyConfig").await {
let changed = pool.update_proxy_maps(cfg.map.clone(), None).await;
if let Some(dc) = cfg.default_dc {
pool.default_dc.store(dc, std::sync::atomic::Ordering::Relaxed);
}
if changed {
info!("ME config updated (v4), reconciling connections");
pool.reconcile_connections(&rng).await;
} else {
debug!("ME config v4 unchanged");
}
} else {
warn!("getProxyConfig update failed");
}
// Update proxy config v6 (optional)
if let Ok(cfg_v6) = fetch_proxy_config("https://core.telegram.org/getProxyConfigV6").await {
let _ = pool.update_proxy_maps(HashMap::new(), Some(cfg_v6.map)).await;
}
// Update proxy-secret
match download_proxy_secret().await {
Ok(secret) => {
if pool.update_secret(secret).await {
info!("proxy-secret updated and pool reconnect scheduled");
}
}
Err(e) => warn!(error = %e, "proxy-secret update failed"),
}
}
}

View File

@@ -3,33 +3,42 @@ use std::sync::Arc;
use std::time::Duration;
use tracing::{debug, info, warn};
use rand::seq::SliceRandom;
use crate::crypto::SecureRandom;
use crate::protocol::constants::TG_MIDDLE_PROXIES_FLAT_V4;
use super::MePool;
pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, min_connections: usize) {
pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_connections: usize) {
loop {
tokio::time::sleep(Duration::from_secs(30)).await;
let current = pool.connection_count();
if current < min_connections {
warn!(
current,
min = min_connections,
"ME pool below minimum, reconnecting..."
);
let addrs = TG_MIDDLE_PROXIES_FLAT_V4.clone();
for &(ip, port) in addrs.iter() {
let needed = min_connections.saturating_sub(pool.connection_count());
if needed == 0 {
break;
}
for _ in 0..needed {
let addr = SocketAddr::new(ip, port);
// Per-DC coverage check
let map = pool.proxy_map_v4.read().await.clone();
let writer_addrs: std::collections::HashSet<SocketAddr> = pool
.writers
.read()
.await
.iter()
.map(|(a, _)| *a)
.collect();
for (dc, addrs) in map.iter() {
let dc_addrs: Vec<SocketAddr> = addrs
.iter()
.map(|(ip, port)| SocketAddr::new(*ip, *port))
.collect();
let has_coverage = dc_addrs.iter().any(|a| writer_addrs.contains(a));
if !has_coverage {
warn!(dc = %dc, "DC has no ME coverage, reconnecting...");
let mut shuffled = dc_addrs.clone();
shuffled.shuffle(&mut rand::rng());
for addr in shuffled {
match pool.connect_one(addr, &rng).await {
Ok(()) => info!(%addr, "ME reconnected"),
Err(e) => debug!(%addr, error = %e, "ME reconnect failed"),
Ok(()) => {
info!(%addr, dc = %dc, "ME reconnected for DC coverage");
break;
}
Err(e) => debug!(%addr, dc = %dc, error = %e, "ME reconnect failed"),
}
}
}

View File

@@ -8,6 +8,7 @@ mod reader;
mod registry;
mod send;
mod secret;
mod config_updater;
mod wire;
use bytes::Bytes;
@@ -16,6 +17,7 @@ pub use health::me_health_monitor;
pub use pool::MePool;
pub use registry::ConnRegistry;
pub use secret::fetch_proxy_secret;
pub use config_updater::{fetch_proxy_config, me_config_updater};
pub use wire::proto_flags_for_tag;
#[derive(Debug)]

View File

@@ -1,11 +1,12 @@
use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::sync::OnceLock;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::{AtomicI32, AtomicU64};
use std::time::Duration;
use bytes::BytesMut;
use rand::Rng;
use rand::seq::SliceRandom;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpStream;
use tokio::sync::{Mutex, RwLock};
@@ -32,11 +33,14 @@ pub struct MePool {
pub(super) writers: Arc<RwLock<Vec<(SocketAddr, Arc<Mutex<RpcWriter>>)>>> ,
pub(super) rr: AtomicU64,
pub(super) proxy_tag: Option<Vec<u8>>,
proxy_secret: Vec<u8>,
pub(super) proxy_secret: Arc<RwLock<Vec<u8>>>,
pub(super) nat_ip_cfg: Option<IpAddr>,
pub(super) nat_ip_detected: OnceLock<IpAddr>,
pub(super) nat_ip_detected: Arc<RwLock<Option<IpAddr>>>,
pub(super) nat_probe: bool,
pub(super) nat_stun: Option<String>,
pub(super) proxy_map_v4: Arc<RwLock<HashMap<i32, Vec<(IpAddr, u16)>>>>,
pub(super) proxy_map_v6: Arc<RwLock<HashMap<i32, Vec<(IpAddr, u16)>>>>,
pub(super) default_dc: AtomicI32,
pool_size: usize,
}
@@ -47,18 +51,24 @@ impl MePool {
nat_ip: Option<IpAddr>,
nat_probe: bool,
nat_stun: Option<String>,
proxy_map_v4: HashMap<i32, Vec<(IpAddr, u16)>>,
proxy_map_v6: HashMap<i32, Vec<(IpAddr, u16)>>,
default_dc: Option<i32>,
) -> Arc<Self> {
Arc::new(Self {
registry: Arc::new(ConnRegistry::new()),
writers: Arc::new(RwLock::new(Vec::new())),
rr: AtomicU64::new(0),
proxy_tag,
proxy_secret,
proxy_secret: Arc::new(RwLock::new(proxy_secret)),
nat_ip_cfg: nat_ip,
nat_ip_detected: OnceLock::new(),
nat_ip_detected: Arc::new(RwLock::new(None)),
nat_probe,
nat_stun,
pool_size: 2,
proxy_map_v4: Arc::new(RwLock::new(proxy_map_v4)),
proxy_map_v6: Arc::new(RwLock::new(proxy_map_v6)),
default_dc: AtomicI32::new(default_dc.unwrap_or(0)),
})
}
@@ -80,39 +90,129 @@ impl MePool {
self.writers.clone()
}
fn key_selector(&self) -> u32 {
if self.proxy_secret.len() >= 4 {
u32::from_le_bytes([
self.proxy_secret[0],
self.proxy_secret[1],
self.proxy_secret[2],
self.proxy_secret[3],
])
pub async fn reconcile_connections(&self, rng: &SecureRandom) {
use std::collections::HashSet;
let map = self.proxy_map_v4.read().await.clone();
let writers = self.writers.read().await;
let current: HashSet<SocketAddr> = writers.iter().map(|(a, _)| *a).collect();
drop(writers);
for (_dc, addrs) in map.iter() {
let dc_addrs: Vec<SocketAddr> = addrs
.iter()
.map(|(ip, port)| SocketAddr::new(*ip, *port))
.collect();
if !dc_addrs.iter().any(|a| current.contains(a)) {
let mut shuffled = dc_addrs.clone();
shuffled.shuffle(&mut rand::rng());
for addr in shuffled {
if self.connect_one(addr, rng).await.is_ok() {
break;
}
}
}
}
}
pub async fn update_proxy_maps(
&self,
new_v4: HashMap<i32, Vec<(IpAddr, u16)>>,
new_v6: Option<HashMap<i32, Vec<(IpAddr, u16)>>>,
) -> bool {
let mut changed = false;
{
let mut guard = self.proxy_map_v4.write().await;
if !new_v4.is_empty() && *guard != new_v4 {
*guard = new_v4;
changed = true;
}
}
if let Some(v6) = new_v6 {
let mut guard = self.proxy_map_v6.write().await;
if !v6.is_empty() && *guard != v6 {
*guard = v6;
}
}
changed
}
pub async fn update_secret(&self, new_secret: Vec<u8>) -> bool {
if new_secret.len() < 32 {
warn!(len = new_secret.len(), "proxy-secret update ignored (too short)");
return false;
}
let mut guard = self.proxy_secret.write().await;
if *guard != new_secret {
*guard = new_secret;
drop(guard);
self.reconnect_all().await;
return true;
}
false
}
pub async fn reconnect_all(&self) {
// Graceful: do not drop all at once. New connections will use updated secret.
// Existing writers remain until health monitor replaces them.
// No-op here to avoid total outage.
}
async fn key_selector(&self) -> u32 {
let secret = self.proxy_secret.read().await;
if secret.len() >= 4 {
u32::from_le_bytes([secret[0], secret[1], secret[2], secret[3]])
} else {
0
}
}
pub async fn init(self: &Arc<Self>, pool_size: usize, rng: &SecureRandom) -> Result<()> {
let addrs = &*TG_MIDDLE_PROXIES_FLAT_V4;
let ks = self.key_selector();
let map = self.proxy_map_v4.read().await;
let ks = self.key_selector().await;
info!(
me_servers = addrs.len(),
me_servers = map.len(),
pool_size,
key_selector = format_args!("0x{ks:08x}"),
secret_len = self.proxy_secret.len(),
secret_len = self.proxy_secret.read().await.len(),
"Initializing ME pool"
);
for &(ip, port) in addrs.iter() {
for i in 0..pool_size {
// Ensure at least one connection per DC with failover over all addresses
for (dc, addrs) in map.iter() {
if addrs.is_empty() {
continue;
}
let mut connected = false;
let mut shuffled = addrs.clone();
shuffled.shuffle(&mut rand::rng());
for (ip, port) in shuffled {
let addr = SocketAddr::new(ip, port);
match self.connect_one(addr, rng).await {
Ok(()) => info!(%addr, idx = i, "ME connected"),
Err(e) => warn!(%addr, idx = i, error = %e, "ME connect failed"),
Ok(()) => {
info!(%addr, dc = %dc, "ME connected");
connected = true;
break;
}
Err(e) => warn!(%addr, dc = %dc, error = %e, "ME connect failed, trying next"),
}
}
if self.writers.read().await.len() >= pool_size {
if !connected {
warn!(dc = %dc, "All ME servers for DC failed at init");
}
}
// Additional connections up to pool_size total (round-robin across DCs)
for (dc, addrs) in map.iter() {
for (ip, port) in addrs {
if self.connection_count() >= pool_size {
break;
}
let addr = SocketAddr::new(*ip, *port);
if let Err(e) = self.connect_one(addr, rng).await {
debug!(%addr, dc = %dc, error = %e, "Extra ME connect failed");
}
}
if self.connection_count() >= pool_size {
break;
}
}
@@ -124,11 +224,12 @@ impl MePool {
}
pub(crate) async fn connect_one(
self: &Arc<Self>,
&self,
addr: SocketAddr,
rng: &SecureRandom,
) -> Result<()> {
let secret = &self.proxy_secret;
let secret_guard = self.proxy_secret.read().await;
let secret: Vec<u8> = secret_guard.clone();
if secret.len() < 32 {
return Err(ProxyError::Proxy(
"proxy-secret too short for ME auth".into(),
@@ -165,7 +266,7 @@ impl MePool {
.unwrap_or_default()
.as_secs() as u32;
let ks = self.key_selector();
let ks = self.key_selector().await;
let nonce_payload = build_nonce_payload(ks, crypto_ts, &my_nonce);
let nonce_frame = build_rpc_frame(-2, &nonce_payload);
let dump = hex_dump(&nonce_frame[..nonce_frame.len().min(44)]);
@@ -234,7 +335,10 @@ impl MePool {
let (srv_ip_opt, clt_ip_opt, clt_v6_opt, srv_v6_opt, hs_our_ip, hs_peer_ip) =
match (server_ip, client_ip) {
(IpMaterial::V4(srv), IpMaterial::V4(clt)) => {
// IPv4: reverse byte order for KDF (Python/C reference behavior)
(IpMaterial::V4(mut srv), IpMaterial::V4(mut clt)) => {
srv.reverse();
clt.reverse();
(Some(srv), Some(clt), None, None, clt, srv)
}
(IpMaterial::V6(srv), IpMaterial::V6(clt)) => {
@@ -263,7 +367,7 @@ impl MePool {
b"CLIENT",
clt_ip_opt.as_ref().map(|x| &x[..]),
&server_port_bytes,
secret,
&secret,
clt_v6_opt.as_ref(),
srv_v6_opt.as_ref(),
);
@@ -276,7 +380,7 @@ impl MePool {
b"SERVER",
clt_ip_opt.as_ref().map(|x| &x[..]),
&server_port_bytes,
secret,
&secret,
clt_v6_opt.as_ref(),
srv_v6_opt.as_ref(),
);
@@ -290,7 +394,7 @@ impl MePool {
b"CLIENT",
clt_ip_opt.as_ref().map(|x| &x[..]),
&server_port_bytes,
secret,
&secret,
clt_v6_opt.as_ref(),
srv_v6_opt.as_ref(),
);
@@ -303,7 +407,7 @@ impl MePool {
b"SERVER",
clt_ip_opt.as_ref().map(|x| &x[..]),
&server_port_bytes,
secret,
&secret,
clt_v6_opt.as_ref(),
srv_v6_opt.as_ref(),
);
@@ -327,7 +431,7 @@ impl MePool {
prekey_sha256_client = %hex_dump(&sha256(&prekey_client)),
prekey_sha256_server = %hex_dump(&sha256(&prekey_server)),
hs_plain = %hex_dump(&hs_frame),
proxy_secret_sha256 = %hex_dump(&sha256(secret)),
proxy_secret_sha256 = %hex_dump(&sha256(&secret)),
"ME diag: derived keys and handshake plaintext"
);
}

View File

@@ -10,7 +10,7 @@ impl MePool {
pub(super) fn translate_ip_for_nat(&self, ip: IpAddr) -> IpAddr {
let nat_ip = self
.nat_ip_cfg
.or_else(|| self.nat_ip_detected.get().copied());
.or_else(|| self.nat_ip_detected.try_read().ok().and_then(|g| (*g).clone()));
let Some(nat_ip) = nat_ip else {
return ip;
@@ -60,13 +60,16 @@ impl MePool {
return None;
}
if let Some(ip) = self.nat_ip_detected.get().copied() {
if let Some(ip) = self.nat_ip_detected.read().await.clone() {
return Some(ip);
}
match fetch_public_ipv4().await {
match fetch_public_ipv4_with_retry().await {
Ok(Some(ip)) => {
let _ = self.nat_ip_detected.set(IpAddr::V4(ip));
{
let mut guard = self.nat_ip_detected.write().await;
*guard = Some(IpAddr::V4(ip));
}
info!(public_ip = %ip, "Auto-detected public IP for NAT translation");
Some(IpAddr::V4(ip))
}
@@ -98,8 +101,22 @@ impl MePool {
}
}
async fn fetch_public_ipv4() -> Result<Option<Ipv4Addr>> {
let res = reqwest::get("https://checkip.amazonaws.com").await.map_err(|e| {
async fn fetch_public_ipv4_with_retry() -> Result<Option<Ipv4Addr>> {
let providers = [
"https://checkip.amazonaws.com",
"http://v4.ident.me",
"http://ipv4.icanhazip.com",
];
for url in providers {
if let Ok(Some(ip)) = fetch_public_ipv4_once(url).await {
return Ok(Some(ip));
}
}
Ok(None)
}
async fn fetch_public_ipv4_once(url: &str) -> Result<Option<Ipv4Addr>> {
let res = reqwest::get(url).await.map_err(|e| {
ProxyError::Proxy(format!("public IP detection request failed: {e}"))
})?;
@@ -128,7 +145,7 @@ async fn fetch_stun_binding(stun_addr: &str) -> Result<Option<std::net::SocketAd
req[0..2].copy_from_slice(&0x0001u16.to_be_bytes()); // Binding Request
req[2..4].copy_from_slice(&0u16.to_be_bytes()); // length
req[4..8].copy_from_slice(&0x2112A442u32.to_be_bytes()); // magic cookie
rand::thread_rng().fill_bytes(&mut req[8..20]);
rand::rng().fill_bytes(&mut req[8..20]);
socket
.send(&req)

View File

@@ -4,9 +4,13 @@ use std::sync::atomic::{AtomicU64, Ordering};
use tokio::sync::{RwLock, mpsc};
use super::MeResponse;
use super::codec::RpcWriter;
use std::sync::Arc;
use tokio::sync::Mutex;
pub struct ConnRegistry {
map: RwLock<HashMap<u64, mpsc::Sender<MeResponse>>>,
writers: RwLock<HashMap<u64, Arc<Mutex<RpcWriter>>>>,
next_id: AtomicU64,
}
@@ -16,6 +20,7 @@ impl ConnRegistry {
let start = rand::random::<u64>() | 1;
Self {
map: RwLock::new(HashMap::new()),
writers: RwLock::new(HashMap::new()),
next_id: AtomicU64::new(start),
}
}
@@ -29,6 +34,7 @@ impl ConnRegistry {
pub async fn unregister(&self, id: u64) {
self.map.write().await.remove(&id);
self.writers.write().await.remove(&id);
}
pub async fn route(&self, id: u64, resp: MeResponse) -> bool {
@@ -39,4 +45,14 @@ impl ConnRegistry {
false
}
}
pub async fn set_writer(&self, id: u64, w: Arc<Mutex<RpcWriter>>) {
let mut guard = self.writers.write().await;
guard.entry(id).or_insert_with(|| w);
}
pub async fn get_writer(&self, id: u64) -> Option<Arc<Mutex<RpcWriter>>> {
let guard = self.writers.read().await;
guard.get(&id).cloned()
}
}

View File

@@ -51,7 +51,7 @@ pub async fn fetch_proxy_secret(cache_path: Option<&str>) -> Result<Vec<u8>> {
}
}
async fn download_proxy_secret() -> Result<Vec<u8>> {
pub async fn download_proxy_secret() -> Result<Vec<u8>> {
let resp = reqwest::get("https://core.telegram.org/getProxySecret")
.await
.map_err(|e| ProxyError::Proxy(format!("Failed to download proxy-secret: {e}")))?;

View File

@@ -6,11 +6,13 @@ use tokio::sync::Mutex;
use tracing::{debug, warn};
use crate::error::{ProxyError, Result};
use crate::protocol::constants::{RPC_CLOSE_EXT_U32, TG_MIDDLE_PROXIES_V4};
use crate::protocol::constants::RPC_CLOSE_EXT_U32;
use super::MePool;
use super::codec::RpcWriter;
use super::wire::build_proxy_req_payload;
use crate::crypto::SecureRandom;
use rand::seq::SliceRandom;
impl MePool {
pub async fn send_proxy_req(
@@ -39,9 +41,28 @@ impl MePool {
let writers: Vec<(SocketAddr, Arc<Mutex<RpcWriter>>)> = ws.iter().cloned().collect();
drop(ws);
let candidate_indices = candidate_indices_for_dc(&writers, target_dc);
let mut candidate_indices = self.candidate_indices_for_dc(&writers, target_dc).await;
if candidate_indices.is_empty() {
return Err(ProxyError::Proxy("No ME writers available for target DC".into()));
// Emergency: try to connect to target DC addresses on the fly, then recompute writers
let map = self.proxy_map_v4.read().await;
if let Some(addrs) = map.get(&(target_dc as i32)) {
let mut shuffled = addrs.clone();
shuffled.shuffle(&mut rand::rng());
drop(map);
for (ip, port) in shuffled {
let addr = SocketAddr::new(ip, port);
if self.connect_one(addr, &SecureRandom::new()).await.is_ok() {
break;
}
}
let ws2 = self.writers.read().await;
let writers: Vec<(SocketAddr, Arc<Mutex<RpcWriter>>)> = ws2.iter().cloned().collect();
drop(ws2);
candidate_indices = self.candidate_indices_for_dc(&writers, target_dc).await;
}
if candidate_indices.is_empty() {
return Err(ProxyError::Proxy("No ME writers available for target DC".into()));
}
}
let start = self.rr.fetch_add(1, Ordering::Relaxed) as usize % candidate_indices.len();
@@ -85,10 +106,7 @@ impl MePool {
}
pub async fn send_close(&self, conn_id: u64) -> Result<()> {
let ws = self.writers.read().await;
if !ws.is_empty() {
let w = ws[0].1.clone();
drop(ws);
if let Some(w) = self.registry.get_writer(conn_id).await {
let mut p = Vec::with_capacity(12);
p.extend_from_slice(&RPC_CLOSE_EXT_U32.to_le_bytes());
p.extend_from_slice(&conn_id.to_le_bytes());
@@ -97,6 +115,8 @@ impl MePool {
let mut ws = self.writers.write().await;
ws.retain(|(_, o)| !Arc::ptr_eq(o, &w));
}
} else {
debug!(conn_id, "ME close skipped (writer missing)");
}
self.registry.unregister(conn_id).await;
@@ -106,41 +126,54 @@ impl MePool {
pub fn connection_count(&self) -> usize {
self.writers.try_read().map(|w| w.len()).unwrap_or(0)
}
}
pub(super) async fn candidate_indices_for_dc(
&self,
writers: &[(SocketAddr, Arc<Mutex<RpcWriter>>)],
target_dc: i16,
) -> Vec<usize> {
let mut preferred = Vec::<SocketAddr>::new();
let key = target_dc as i32;
let map = self.proxy_map_v4.read().await;
fn candidate_indices_for_dc(
writers: &[(SocketAddr, Arc<Mutex<RpcWriter>>)],
target_dc: i16,
) -> Vec<usize> {
let mut preferred = Vec::<SocketAddr>::new();
let key = target_dc as i32;
if let Some(v) = TG_MIDDLE_PROXIES_V4.get(&key) {
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
}
if preferred.is_empty() {
let abs = key.abs();
if let Some(v) = TG_MIDDLE_PROXIES_V4.get(&abs) {
if let Some(v) = map.get(&key) {
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
}
}
if preferred.is_empty() {
let abs = key.abs();
if let Some(v) = TG_MIDDLE_PROXIES_V4.get(&-abs) {
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
if preferred.is_empty() {
let abs = key.abs();
if let Some(v) = map.get(&abs) {
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
}
}
}
if preferred.is_empty() {
return (0..writers.len()).collect();
if preferred.is_empty() {
let abs = key.abs();
if let Some(v) = map.get(&-abs) {
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
}
}
if preferred.is_empty() {
let def = self.default_dc.load(Ordering::Relaxed);
if def != 0 {
if let Some(v) = map.get(&def) {
preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port)));
}
}
}
if preferred.is_empty() {
return (0..writers.len()).collect();
}
let mut out = Vec::new();
for (idx, (addr, _)) in writers.iter().enumerate() {
if preferred.iter().any(|p| p == addr) {
out.push(idx);
}
}
if out.is_empty() {
return (0..writers.len()).collect();
}
out
}
let mut out = Vec::new();
for (idx, (addr, _)) in writers.iter().enumerate() {
if preferred.iter().any(|p| p == addr) {
out.push(idx);
}
}
if out.is_empty() {
return (0..writers.len()).collect();
}
out
}

View File

@@ -28,9 +28,7 @@ fn ipv4_to_mapped_v6_c_compat(ip: Ipv4Addr) -> [u8; 16] {
buf[8..12].copy_from_slice(&(-0x10000i32).to_le_bytes());
// Matches tl_store_int(htonl(remote_ip_host_order)).
let host_order = u32::from_ne_bytes(ip.octets());
let network_order = host_order.to_be();
buf[12..16].copy_from_slice(&network_order.to_le_bytes());
buf[12..16].copy_from_slice(&ip.octets());
buf
}
@@ -60,7 +58,7 @@ pub(crate) fn build_proxy_req_payload(
append_mapped_addr_and_port(&mut b, client_addr);
append_mapped_addr_and_port(&mut b, our_addr);
if proto_flags & 12 != 0 {
if proto_flags & RPC_FLAG_HAS_AD_TAG != 0 {
let extra_start = b.len();
b.extend_from_slice(&0u32.to_le_bytes());
@@ -104,3 +102,17 @@ pub fn proto_flags_for_tag(tag: crate::protocol::constants::ProtoTag, has_proxy_
ProtoTag::Secure => flags | RPC_FLAG_PAD | RPC_FLAG_INTERMEDIATE,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ipv4_mapped_encoding() {
let ip = Ipv4Addr::new(149, 154, 175, 50);
let buf = ipv4_to_mapped_v6_c_compat(ip);
assert_eq!(&buf[0..10], &[0u8; 10]);
assert_eq!(&buf[10..12], &[0xff, 0xff]);
assert_eq!(&buf[12..16], &[149, 154, 175, 50]);
}
}

BIN
telemt Normal file

Binary file not shown.

121
tools/dc.py Normal file
View File

@@ -0,0 +1,121 @@
from telethon import TelegramClient
from telethon.tl.functions.help import GetConfigRequest
import asyncio
api_id = ''
api_hash = ''
async def get_all_servers():
print("🔄 Подключаемся к Telegram...")
client = TelegramClient('session', api_id, api_hash)
await client.start()
print("✅ Подключение установлено!\n")
print("📡 Запрашиваем конфигурацию серверов...")
config = await client(GetConfigRequest())
print(f"📊 Получено серверов: {len(config.dc_options)}\n")
print("="*80)
# Группируем серверы по DC ID
dc_groups = {}
for dc in config.dc_options:
if dc.id not in dc_groups:
dc_groups[dc.id] = []
dc_groups[dc.id].append(dc)
# Выводим все серверы, сгруппированные по DC
for dc_id in sorted(dc_groups.keys()):
servers = dc_groups[dc_id]
print(f"\n🌐 DATACENTER {dc_id} ({len(servers)} серверов)")
print("-" * 80)
for dc in servers:
# Собираем флаги
flags = []
if dc.ipv6:
flags.append("IPv6")
if dc.media_only:
flags.append("🎬 MEDIA-ONLY")
if dc.cdn:
flags.append("📦 CDN")
if dc.tcpo_only:
flags.append("🔒 TCPO")
if dc.static:
flags.append("📌 STATIC")
flags_str = f" [{', '.join(flags)}]" if flags else " [STANDARD]"
# Форматируем IP (выравниваем для читаемости)
ip_display = f"{dc.ip_address:45}"
print(f" {ip_display}:{dc.port:5}{flags_str}")
# Статистика
print("\n" + "="*80)
print("📈 СТАТИСТИКА:")
print("="*80)
total = len(config.dc_options)
ipv4_count = sum(1 for dc in config.dc_options if not dc.ipv6)
ipv6_count = sum(1 for dc in config.dc_options if dc.ipv6)
media_count = sum(1 for dc in config.dc_options if dc.media_only)
cdn_count = sum(1 for dc in config.dc_options if dc.cdn)
tcpo_count = sum(1 for dc in config.dc_options if dc.tcpo_only)
static_count = sum(1 for dc in config.dc_options if dc.static)
print(f" Всего серверов: {total}")
print(f" IPv4 серверы: {ipv4_count}")
print(f" IPv6 серверы: {ipv6_count}")
print(f" Media-only: {media_count}")
print(f" CDN серверы: {cdn_count}")
print(f" TCPO-only: {tcpo_count}")
print(f" Static: {static_count}")
# Дополнительная информация из config
print("\n" + "="*80)
print(" ДОПОЛНИТЕЛЬНАЯ ИНФОРМАЦИЯ:")
print("="*80)
print(f" Дата конфигурации: {config.date}")
print(f" Expires: {config.expires}")
print(f" Test mode: {config.test_mode}")
print(f" This DC: {config.this_dc}")
# Сохраняем в файл
print("\n💾 Сохраняем результаты в файл telegram_servers.txt...")
with open('telegram_servers.txt', 'w', encoding='utf-8') as f:
f.write("TELEGRAM DATACENTER SERVERS\n")
f.write("="*80 + "\n\n")
for dc_id in sorted(dc_groups.keys()):
servers = dc_groups[dc_id]
f.write(f"\nDATACENTER {dc_id} ({len(servers)} servers)\n")
f.write("-" * 80 + "\n")
for dc in servers:
flags = []
if dc.ipv6:
flags.append("IPv6")
if dc.media_only:
flags.append("MEDIA-ONLY")
if dc.cdn:
flags.append("CDN")
if dc.tcpo_only:
flags.append("TCPO")
if dc.static:
flags.append("STATIC")
flags_str = f" [{', '.join(flags)}]" if flags else " [STANDARD]"
f.write(f" {dc.ip_address}:{dc.port}{flags_str}\n")
f.write(f"\n\nTotal servers: {total}\n")
f.write(f"Generated: {config.date}\n")
print("✅ Результаты сохранены в telegram_servers.txt")
await client.disconnect()
print("\n👋 Отключились от Telegram")
if __name__ == '__main__':
asyncio.run(get_all_servers())