Key derivation + me_health_monitor + QuickACK

Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
This commit is contained in:
Alexey
2026-02-13 12:51:49 +03:00
parent a494dfa9eb
commit f1c1f42de8
4 changed files with 1771 additions and 1018 deletions

View File

@@ -116,6 +116,11 @@ pub struct GeneralConfig {
#[serde(default)] #[serde(default)]
pub ad_tag: Option<String>, pub ad_tag: Option<String>,
/// Path to proxy-secret binary file (auto-downloaded if absent).
/// Infrastructure secret from https://core.telegram.org/getProxySecret
#[serde(default)]
pub proxy_secret_path: Option<String>,
#[serde(default)] #[serde(default)]
pub log_level: LogLevel, pub log_level: LogLevel,
@@ -129,6 +134,7 @@ impl Default for GeneralConfig {
fast_mode: true, fast_mode: true,
use_middle_proxy: false, use_middle_proxy: false,
ad_tag: None, ad_tag: None,
proxy_secret_path: None,
log_level: LogLevel::Normal, log_level: LogLevel::Normal,
} }
} }

View File

@@ -1,313 +1,391 @@
//! Telemt - MTProxy on Rust //! telemt — Telegram MTProto Proxy
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use tokio::net::TcpListener;
use tokio::signal;
use tokio::sync::Semaphore;
use tracing::{info, error, warn, debug};
use tracing_subscriber::{fmt, EnvFilter, reload, prelude::*};
mod cli;
mod config;
mod crypto;
mod error;
mod protocol;
mod proxy;
mod stats;
mod stream;
mod transport;
mod util;
use crate::config::{ProxyConfig, LogLevel};
use crate::proxy::ClientHandler;
use crate::stats::{Stats, ReplayChecker};
use crate::crypto::SecureRandom;
use crate::transport::{create_listener, ListenOptions, UpstreamManager};
use crate::util::ip::detect_ip;
use crate::stream::BufferPool;
fn parse_cli() -> (String, bool, Option<String>) {
let mut config_path = "config.toml".to_string();
let mut silent = false;
let mut log_level: Option<String> = None;
let args: Vec<String> = std::env::args().skip(1).collect(); use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use tokio::net::TcpListener;
use tokio::signal;
use tokio::sync::Semaphore;
use tracing::{info, error, warn, debug};
use tracing_subscriber::{fmt, EnvFilter, reload, prelude::*};
// Check for --init first (handled before tokio) mod cli;
if let Some(init_opts) = cli::parse_init_args(&args) { mod config;
if let Err(e) = cli::run_init(init_opts) { mod crypto;
eprintln!("[telemt] Init failed: {}", e); mod error;
mod protocol;
mod proxy;
mod stats;
mod stream;
mod transport;
mod util;
use crate::config::{ProxyConfig, LogLevel};
use crate::proxy::ClientHandler;
use crate::stats::{Stats, ReplayChecker};
use crate::crypto::SecureRandom;
use crate::transport::{create_listener, ListenOptions, UpstreamManager};
use crate::transport::middle_proxy::MePool;
use crate::util::ip::detect_ip;
use crate::stream::BufferPool;
fn parse_cli() -> (String, bool, Option<String>) {
let mut config_path = "config.toml".to_string();
let mut silent = false;
let mut log_level: Option<String> = None;
let args: Vec<String> = std::env::args().skip(1).collect();
// Check for --init first (handled before tokio)
if let Some(init_opts) = cli::parse_init_args(&args) {
if let Err(e) = cli::run_init(init_opts) {
eprintln!("[telemt] Init failed: {}", e);
std::process::exit(1);
}
std::process::exit(0);
}
let mut i = 0;
while i < args.len() {
match args[i].as_str() {
"--silent" | "-s" => { silent = true; }
"--log-level" => {
i += 1;
if i < args.len() { log_level = Some(args[i].clone()); }
}
s if s.starts_with("--log-level=") => {
log_level = Some(s.trim_start_matches("--log-level=").to_string());
}
"--help" | "-h" => {
eprintln!("Usage: telemt [config.toml] [OPTIONS]");
eprintln!();
eprintln!("Options:");
eprintln!(" --silent, -s Suppress info logs");
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
eprintln!(" --help, -h Show this help");
eprintln!();
eprintln!("Setup (fire-and-forget):");
eprintln!(" --init Generate config, install systemd service, start");
eprintln!(" --port <PORT> Listen port (default: 443)");
eprintln!(" --domain <DOMAIN> TLS domain for masking (default: www.google.com)");
eprintln!(" --secret <HEX> 32-char hex secret (auto-generated if omitted)");
eprintln!(" --user <NAME> Username (default: user)");
eprintln!(" --config-dir <DIR> Config directory (default: /etc/telemt)");
eprintln!(" --no-start Don't start the service after install");
std::process::exit(0);
}
s if !s.starts_with('-') => { config_path = s.to_string(); }
other => { eprintln!("Unknown option: {}", other); }
}
i += 1;
}
(config_path, silent, log_level)
}
#[tokio::main]
async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
let (config_path, cli_silent, cli_log_level) = parse_cli();
let config = match ProxyConfig::load(&config_path) {
Ok(c) => c,
Err(e) => {
if std::path::Path::new(&config_path).exists() {
eprintln!("[telemt] Error: {}", e);
std::process::exit(1);
} else {
let default = ProxyConfig::default();
std::fs::write(&config_path, toml::to_string_pretty(&default).unwrap()).unwrap();
eprintln!("[telemt] Created default config at {}", config_path);
default
}
}
};
if let Err(e) = config.validate() {
eprintln!("[telemt] Invalid config: {}", e);
std::process::exit(1); std::process::exit(1);
} }
std::process::exit(0);
}
let mut i = 0; let has_rust_log = std::env::var("RUST_LOG").is_ok();
while i < args.len() { let effective_log_level = if cli_silent {
match args[i].as_str() { LogLevel::Silent
"--silent" | "-s" => { silent = true; } } else if let Some(ref s) = cli_log_level {
"--log-level" => { LogLevel::from_str_loose(s)
i += 1; } else {
if i < args.len() { log_level = Some(args[i].clone()); } config.general.log_level.clone()
}
s if s.starts_with("--log-level=") => {
log_level = Some(s.trim_start_matches("--log-level=").to_string());
}
"--help" | "-h" => {
eprintln!("Usage: telemt [config.toml] [OPTIONS]");
eprintln!();
eprintln!("Options:");
eprintln!(" --silent, -s Suppress info logs");
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
eprintln!(" --help, -h Show this help");
eprintln!();
eprintln!("Setup (fire-and-forget):");
eprintln!(" --init Generate config, install systemd service, start");
eprintln!(" --port <PORT> Listen port (default: 443)");
eprintln!(" --domain <DOMAIN> TLS domain for masking (default: www.google.com)");
eprintln!(" --secret <HEX> 32-char hex secret (auto-generated if omitted)");
eprintln!(" --user <NAME> Username (default: user)");
eprintln!(" --config-dir <DIR> Config directory (default: /etc/telemt)");
eprintln!(" --no-start Don't start the service after install");
std::process::exit(0);
}
s if !s.starts_with('-') => { config_path = s.to_string(); }
other => { eprintln!("Unknown option: {}", other); }
}
i += 1;
}
(config_path, silent, log_level)
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let (config_path, cli_silent, cli_log_level) = parse_cli();
let config = match ProxyConfig::load(&config_path) {
Ok(c) => c,
Err(e) => {
if std::path::Path::new(&config_path).exists() {
eprintln!("[telemt] Error: {}", e);
std::process::exit(1);
} else {
let default = ProxyConfig::default();
std::fs::write(&config_path, toml::to_string_pretty(&default).unwrap()).unwrap();
eprintln!("[telemt] Created default config at {}", config_path);
default
}
}
};
if let Err(e) = config.validate() {
eprintln!("[telemt] Invalid config: {}", e);
std::process::exit(1);
}
let has_rust_log = std::env::var("RUST_LOG").is_ok();
let effective_log_level = if cli_silent {
LogLevel::Silent
} else if let Some(ref s) = cli_log_level {
LogLevel::from_str_loose(s)
} else {
config.general.log_level.clone()
};
// Start with INFO so startup messages are always visible,
// then switch to user-configured level after startup
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new("info"));
tracing_subscriber::registry()
.with(filter_layer)
.with(fmt::Layer::default())
.init();
info!("Telemt MTProxy v{}", env!("CARGO_PKG_VERSION"));
info!("Log level: {}", effective_log_level);
info!("Modes: classic={} secure={} tls={}",
config.general.modes.classic,
config.general.modes.secure,
config.general.modes.tls);
info!("TLS domain: {}", config.censorship.tls_domain);
if let Some(ref sock) = config.censorship.mask_unix_sock {
info!("Mask: {} -> unix:{}", config.censorship.mask, sock);
if !std::path::Path::new(sock).exists() {
warn!("Unix socket '{}' does not exist yet. Masking will fail until it appears.", sock);
}
} else {
info!("Mask: {} -> {}:{}",
config.censorship.mask,
config.censorship.mask_host.as_deref().unwrap_or(&config.censorship.tls_domain),
config.censorship.mask_port);
}
if config.censorship.tls_domain == "www.google.com" {
warn!("Using default tls_domain. Consider setting a custom domain.");
}
let prefer_ipv6 = config.general.prefer_ipv6;
let config = Arc::new(config);
let stats = Arc::new(Stats::new());
let rng = Arc::new(SecureRandom::new());
let replay_checker = Arc::new(ReplayChecker::new(
config.access.replay_check_len,
Duration::from_secs(config.access.replay_window_secs),
));
let upstream_manager = Arc::new(UpstreamManager::new(config.upstreams.clone()));
let buffer_pool = Arc::new(BufferPool::with_config(16 * 1024, 4096));
// Connection concurrency limit — prevents OOM under SYN flood / connection storm.
// 10000 is generous; each connection uses ~64KB (2x 16KB relay buffers + overhead).
// 10000 connections ≈ 640MB peak memory.
let max_connections = Arc::new(Semaphore::new(10_000));
// Startup DC ping
info!("=== Telegram DC Connectivity ===");
let ping_results = upstream_manager.ping_all_dcs(prefer_ipv6).await;
for upstream_result in &ping_results {
info!(" via {}", upstream_result.upstream_name);
for dc in &upstream_result.results {
match (&dc.rtt_ms, &dc.error) {
(Some(rtt), _) => {
info!(" DC{} ({:>21}): {:.0}ms", dc.dc_idx, dc.dc_addr, rtt);
}
(None, Some(err)) => {
info!(" DC{} ({:>21}): FAIL ({})", dc.dc_idx, dc.dc_addr, err);
}
_ => {
info!(" DC{} ({:>21}): FAIL", dc.dc_idx, dc.dc_addr);
}
}
}
}
info!("================================");
// Background tasks
let um_clone = upstream_manager.clone();
tokio::spawn(async move { um_clone.run_health_checks(prefer_ipv6).await; });
let rc_clone = replay_checker.clone();
tokio::spawn(async move { rc_clone.run_periodic_cleanup().await; });
let detected_ip = detect_ip().await;
debug!("Detected IPs: v4={:?} v6={:?}", detected_ip.ipv4, detected_ip.ipv6);
let mut listeners = Vec::new();
for listener_conf in &config.server.listeners {
let addr = SocketAddr::new(listener_conf.ip, config.server.port);
let options = ListenOptions {
ipv6_only: listener_conf.ip.is_ipv6(),
..Default::default()
}; };
match create_listener(addr, &options) { let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new("info"));
Ok(socket) => { tracing_subscriber::registry()
let listener = TcpListener::from_std(socket.into())?; .with(filter_layer)
info!("Listening on {}", addr); .with(fmt::Layer::default())
.init();
let public_ip = if let Some(ip) = listener_conf.announce_ip {
ip info!("Telemt MTProxy v{}", env!("CARGO_PKG_VERSION"));
} else if listener_conf.ip.is_unspecified() { info!("Log level: {}", effective_log_level);
if listener_conf.ip.is_ipv4() { info!("Modes: classic={} secure={} tls={}",
detected_ip.ipv4.unwrap_or(listener_conf.ip) config.general.modes.classic,
} else { config.general.modes.secure,
detected_ip.ipv6.unwrap_or(listener_conf.ip) config.general.modes.tls);
} info!("TLS domain: {}", config.censorship.tls_domain);
} else { if let Some(ref sock) = config.censorship.mask_unix_sock {
listener_conf.ip info!("Mask: {} -> unix:{}", config.censorship.mask, sock);
}; if !std::path::Path::new(sock).exists() {
warn!("Unix socket '{}' does not exist yet. Masking will fail until it appears.", sock);
if !config.show_link.is_empty() { }
info!("--- Proxy Links ({}) ---", public_ip); } else {
for user_name in &config.show_link { info!("Mask: {} -> {}:{}",
if let Some(secret) = config.access.users.get(user_name) { config.censorship.mask,
info!("User: {}", user_name); config.censorship.mask_host.as_deref().unwrap_or(&config.censorship.tls_domain),
if config.general.modes.classic { config.censorship.mask_port);
info!(" Classic: tg://proxy?server={}&port={}&secret={}", }
public_ip, config.server.port, secret);
if config.censorship.tls_domain == "www.google.com" {
warn!("Using default tls_domain. Consider setting a custom domain.");
}
let prefer_ipv6 = config.general.prefer_ipv6;
let use_middle_proxy = config.general.use_middle_proxy;
let config = Arc::new(config);
let stats = Arc::new(Stats::new());
let rng = Arc::new(SecureRandom::new());
let replay_checker = Arc::new(ReplayChecker::new(
config.access.replay_check_len,
Duration::from_secs(config.access.replay_window_secs),
));
let upstream_manager = Arc::new(UpstreamManager::new(config.upstreams.clone()));
let buffer_pool = Arc::new(BufferPool::with_config(16 * 1024, 4096));
// Connection concurrency limit
let _max_connections = Arc::new(Semaphore::new(10_000));
// =====================================================================
// Middle Proxy initialization (if enabled)
// =====================================================================
let me_pool: Option<Arc<MePool>> = if use_middle_proxy {
info!("=== Middle Proxy Mode ===");
// ad_tag (proxy_tag) for advertising
let proxy_tag = config.general.ad_tag.as_ref().map(|tag| {
hex::decode(tag).unwrap_or_else(|_| {
warn!("Invalid ad_tag hex, middle proxy ad_tag will be empty");
Vec::new()
})
});
// =============================================================
// CRITICAL: Download Telegram proxy-secret (NOT user secret!)
//
// C MTProxy uses TWO separate secrets:
// -S flag = 16-byte user secret for client obfuscation
// --aes-pwd = 32-512 byte binary file for ME RPC auth
//
// proxy-secret is from: https://core.telegram.org/getProxySecret
// =============================================================
let proxy_secret_path = config.general.proxy_secret_path.as_deref();
match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).await {
Ok(proxy_secret) => {
info!(
secret_len = proxy_secret.len(),
key_sig = format_args!("0x{:08x}",
if proxy_secret.len() >= 4 {
u32::from_le_bytes([proxy_secret[0], proxy_secret[1],
proxy_secret[2], proxy_secret[3]])
} else { 0 }),
"Proxy-secret loaded"
);
let pool = MePool::new(proxy_tag, proxy_secret);
match pool.init(2, &rng).await {
Ok(()) => {
info!("Middle-End pool initialized successfully");
// Phase 4: Start health monitor
let pool_clone = pool.clone();
let rng_clone = rng.clone();
tokio::spawn(async move {
crate::transport::middle_proxy::me_health_monitor(
pool_clone, rng_clone, 2,
).await;
});
Some(pool)
} }
if config.general.modes.secure { Err(e) => {
info!(" DD: tg://proxy?server={}&port={}&secret=dd{}", error!(error = %e, "Failed to initialize ME pool. Falling back to direct mode.");
public_ip, config.server.port, secret); None
} }
if config.general.modes.tls {
let domain_hex = hex::encode(&config.censorship.tls_domain);
info!(" EE-TLS: tg://proxy?server={}&port={}&secret=ee{}{}",
public_ip, config.server.port, secret, domain_hex);
}
} else {
warn!("User '{}' in show_link not found", user_name);
} }
} }
info!("------------------------"); Err(e) => {
error!(error = %e, "Failed to fetch proxy-secret. Falling back to direct mode.");
None
}
}
} else {
None
};
if me_pool.is_some() {
info!("Transport: Middle Proxy (supports all DCs including CDN)");
} else {
info!("Transport: Direct TCP (standard DCs only)");
}
// Startup DC ping (only meaningful in direct mode)
if me_pool.is_none() {
info!("=== Telegram DC Connectivity ===");
let ping_results = upstream_manager.ping_all_dcs(prefer_ipv6).await;
for upstream_result in &ping_results {
info!(" via {}", upstream_result.upstream_name);
for dc in &upstream_result.results {
match (&dc.rtt_ms, &dc.error) {
(Some(rtt), _) => {
info!(" DC{} ({:>21}): {:.0}ms", dc.dc_idx, dc.dc_addr, rtt);
}
(None, Some(err)) => {
info!(" DC{} ({:>21}): FAIL ({})", dc.dc_idx, dc.dc_addr, err);
}
_ => {
info!(" DC{} ({:>21}): FAIL", dc.dc_idx, dc.dc_addr);
}
}
}
}
info!("================================");
}
// Background tasks
let um_clone = upstream_manager.clone();
tokio::spawn(async move { um_clone.run_health_checks(prefer_ipv6).await; });
let rc_clone = replay_checker.clone();
tokio::spawn(async move { rc_clone.run_periodic_cleanup().await; });
let detected_ip = detect_ip().await;
debug!("Detected IPs: v4={:?} v6={:?}", detected_ip.ipv4, detected_ip.ipv6);
let mut listeners = Vec::new();
for listener_conf in &config.server.listeners {
let addr = SocketAddr::new(listener_conf.ip, config.server.port);
let options = ListenOptions {
ipv6_only: listener_conf.ip.is_ipv6(),
..Default::default()
};
match create_listener(addr, &options) {
Ok(socket) => {
let listener = TcpListener::from_std(socket.into())?;
info!("Listening on {}", addr);
let public_ip = if let Some(ip) = listener_conf.announce_ip {
ip
} else if listener_conf.ip.is_unspecified() {
if listener_conf.ip.is_ipv4() {
detected_ip.ipv4.unwrap_or(listener_conf.ip)
} else {
detected_ip.ipv6.unwrap_or(listener_conf.ip)
}
} else {
listener_conf.ip
};
if !config.show_link.is_empty() {
info!("--- Proxy Links ({}) ---", public_ip);
for user_name in &config.show_link {
if let Some(secret) = config.access.users.get(user_name) {
info!("User: {}", user_name);
if config.general.modes.classic {
info!(" Classic: tg://proxy?server={}&port={}&secret={}",
public_ip, config.server.port, secret);
}
if config.general.modes.secure {
info!(" DD: tg://proxy?server={}&port={}&secret=dd{}",
public_ip, config.server.port, secret);
}
if config.general.modes.tls {
let domain_hex = hex::encode(&config.censorship.tls_domain);
info!(" EE-TLS: tg://proxy?server={}&port={}&secret=ee{}{}",
public_ip, config.server.port, secret, domain_hex);
}
} else {
warn!("User '{}' in show_link not found", user_name);
}
}
info!("------------------------");
}
listeners.push(listener);
},
Err(e) => {
error!("Failed to bind to {}: {}", addr, e);
} }
listeners.push(listener);
},
Err(e) => {
error!("Failed to bind to {}: {}", addr, e);
} }
} }
}
if listeners.is_empty() { if listeners.is_empty() {
error!("No listeners. Exiting."); error!("No listeners. Exiting.");
std::process::exit(1); std::process::exit(1);
} }
// Switch to user-configured log level after startup // Switch to user-configured log level after startup
let runtime_filter = if has_rust_log { let runtime_filter = if has_rust_log {
EnvFilter::from_default_env() EnvFilter::from_default_env()
} else { } else {
EnvFilter::new(effective_log_level.to_filter_str()) EnvFilter::new(effective_log_level.to_filter_str())
}; };
filter_handle.reload(runtime_filter).expect("Failed to switch log filter"); filter_handle.reload(runtime_filter).expect("Failed to switch log filter");
for listener in listeners { for listener in listeners {
let config = config.clone(); let config = config.clone();
let stats = stats.clone(); let stats = stats.clone();
let upstream_manager = upstream_manager.clone(); let upstream_manager = upstream_manager.clone();
let replay_checker = replay_checker.clone(); let replay_checker = replay_checker.clone();
let buffer_pool = buffer_pool.clone(); let buffer_pool = buffer_pool.clone();
let rng = rng.clone(); let rng = rng.clone();
let me_pool = me_pool.clone();
tokio::spawn(async move {
loop { tokio::spawn(async move {
match listener.accept().await { loop {
Ok((stream, peer_addr)) => { match listener.accept().await {
let config = config.clone(); Ok((stream, peer_addr)) => {
let stats = stats.clone(); let config = config.clone();
let upstream_manager = upstream_manager.clone(); let stats = stats.clone();
let replay_checker = replay_checker.clone(); let upstream_manager = upstream_manager.clone();
let buffer_pool = buffer_pool.clone(); let replay_checker = replay_checker.clone();
let rng = rng.clone(); let buffer_pool = buffer_pool.clone();
let rng = rng.clone();
tokio::spawn(async move { let me_pool = me_pool.clone();
if let Err(e) = ClientHandler::new(
stream, peer_addr, config, stats, tokio::spawn(async move {
upstream_manager, replay_checker, buffer_pool, rng if let Err(e) = ClientHandler::new(
).run().await { stream, peer_addr, config, stats,
debug!(peer = %peer_addr, error = %e, "Connection error"); upstream_manager, replay_checker, buffer_pool, rng,
} me_pool,
}); ).run().await {
} debug!(peer = %peer_addr, error = %e, "Connection error");
Err(e) => { }
error!("Accept error: {}", e); });
tokio::time::sleep(Duration::from_millis(100)).await; }
Err(e) => {
error!("Accept error: {}", e);
tokio::time::sleep(Duration::from_millis(100)).await;
}
} }
} }
} });
}); }
match signal::ctrl_c().await {
Ok(()) => info!("Shutting down..."),
Err(e) => error!("Signal error: {}", e),
}
Ok(())
} }
match signal::ctrl_c().await {
Ok(()) => info!("Shutting down..."),
Err(e) => error!("Signal error: {}", e),
}
Ok(())
}

View File

@@ -1,403 +1,583 @@
//! Client Handler //! Client Handler
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt}; use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt};
use tokio::time::timeout; use tokio::time::timeout;
use tracing::{debug, info, warn, error, trace}; use tracing::{debug, info, warn, error, trace};
use crate::config::ProxyConfig; use crate::config::ProxyConfig;
use crate::error::{ProxyError, Result, HandshakeResult}; use crate::error::{ProxyError, Result, HandshakeResult};
use crate::protocol::constants::*; use crate::protocol::constants::*;
use crate::protocol::tls; use crate::protocol::tls;
use crate::stats::{Stats, ReplayChecker}; use crate::stats::{Stats, ReplayChecker};
use crate::transport::{configure_client_socket, UpstreamManager}; use crate::transport::{configure_client_socket, UpstreamManager};
use crate::stream::{CryptoReader, CryptoWriter, FakeTlsReader, FakeTlsWriter, BufferPool}; use crate::transport::middle_proxy::{MePool, MeResponse};
use crate::crypto::{AesCtr, SecureRandom}; use crate::stream::{CryptoReader, CryptoWriter, FakeTlsReader, FakeTlsWriter, BufferPool};
use crate::crypto::{AesCtr, SecureRandom};
use crate::proxy::handshake::{
handle_tls_handshake, handle_mtproto_handshake, use crate::proxy::handshake::{
HandshakeSuccess, generate_tg_nonce, encrypt_tg_nonce, handle_tls_handshake, handle_mtproto_handshake,
}; HandshakeSuccess, generate_tg_nonce, encrypt_tg_nonce,
use crate::proxy::relay::relay_bidirectional; };
use crate::proxy::masking::handle_bad_client; use crate::proxy::relay::relay_bidirectional;
use crate::proxy::masking::handle_bad_client;
pub struct ClientHandler;
pub struct ClientHandler;
pub struct RunningClientHandler {
stream: TcpStream, pub struct RunningClientHandler {
peer: SocketAddr,
config: Arc<ProxyConfig>,
stats: Arc<Stats>,
replay_checker: Arc<ReplayChecker>,
upstream_manager: Arc<UpstreamManager>,
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
}
impl ClientHandler {
pub fn new(
stream: TcpStream, stream: TcpStream,
peer: SocketAddr, peer: SocketAddr,
config: Arc<ProxyConfig>, config: Arc<ProxyConfig>,
stats: Arc<Stats>, stats: Arc<Stats>,
upstream_manager: Arc<UpstreamManager>,
replay_checker: Arc<ReplayChecker>, replay_checker: Arc<ReplayChecker>,
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
) -> RunningClientHandler {
RunningClientHandler {
stream, peer, config, stats, replay_checker,
upstream_manager, buffer_pool, rng,
}
}
}
impl RunningClientHandler {
pub async fn run(mut self) -> Result<()> {
self.stats.increment_connects_all();
let peer = self.peer;
debug!(peer = %peer, "New connection");
if let Err(e) = configure_client_socket(
&self.stream,
self.config.timeouts.client_keepalive,
self.config.timeouts.client_ack,
) {
debug!(peer = %peer, error = %e, "Failed to configure client socket");
}
let handshake_timeout = Duration::from_secs(self.config.timeouts.client_handshake);
let stats = self.stats.clone();
let result = timeout(handshake_timeout, self.do_handshake()).await;
match result {
Ok(Ok(())) => {
debug!(peer = %peer, "Connection handled successfully");
Ok(())
}
Ok(Err(e)) => {
debug!(peer = %peer, error = %e, "Handshake failed");
Err(e)
}
Err(_) => {
stats.increment_handshake_timeouts();
debug!(peer = %peer, "Handshake timeout");
Err(ProxyError::TgHandshakeTimeout)
}
}
}
async fn do_handshake(mut self) -> Result<()> {
let mut first_bytes = [0u8; 5];
self.stream.read_exact(&mut first_bytes).await?;
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
let peer = self.peer;
debug!(peer = %peer, is_tls = is_tls, "Handshake type detected");
if is_tls {
self.handle_tls_client(first_bytes).await
} else {
self.handle_direct_client(first_bytes).await
}
}
async fn handle_tls_client(mut self, first_bytes: [u8; 5]) -> Result<()> {
let peer = self.peer;
let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
debug!(peer = %peer, tls_len = tls_len, "Reading TLS handshake");
if tls_len < 512 {
debug!(peer = %peer, tls_len = tls_len, "TLS handshake too short");
self.stats.increment_connects_bad();
let (reader, writer) = self.stream.into_split();
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
return Ok(());
}
let mut handshake = vec![0u8; 5 + tls_len];
handshake[..5].copy_from_slice(&first_bytes);
self.stream.read_exact(&mut handshake[5..]).await?;
let config = self.config.clone();
let replay_checker = self.replay_checker.clone();
let stats = self.stats.clone();
let buffer_pool = self.buffer_pool.clone();
let (read_half, write_half) = self.stream.into_split();
let (mut tls_reader, tls_writer, _tls_user) = match handle_tls_handshake(
&handshake, read_half, write_half, peer,
&config, &replay_checker, &self.rng,
).await {
HandshakeResult::Success(result) => result,
HandshakeResult::BadClient { reader, writer } => {
stats.increment_connects_bad();
handle_bad_client(reader, writer, &handshake, &config).await;
return Ok(());
}
HandshakeResult::Error(e) => return Err(e),
};
debug!(peer = %peer, "Reading MTProto handshake through TLS");
let mtproto_data = tls_reader.read_exact(HANDSHAKE_LEN).await?;
let mtproto_handshake: [u8; HANDSHAKE_LEN] = mtproto_data[..].try_into()
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
&mtproto_handshake, tls_reader, tls_writer, peer,
&config, &replay_checker, true,
).await {
HandshakeResult::Success(result) => result,
HandshakeResult::BadClient { reader: _, writer: _ } => {
stats.increment_connects_bad();
debug!(peer = %peer, "Valid TLS but invalid MTProto handshake");
return Ok(());
}
HandshakeResult::Error(e) => return Err(e),
};
Self::handle_authenticated_static(
crypto_reader, crypto_writer, success,
self.upstream_manager, self.stats, self.config,
buffer_pool, self.rng,
).await
}
async fn handle_direct_client(mut self, first_bytes: [u8; 5]) -> Result<()> {
let peer = self.peer;
if !self.config.general.modes.classic && !self.config.general.modes.secure {
debug!(peer = %peer, "Non-TLS modes disabled");
self.stats.increment_connects_bad();
let (reader, writer) = self.stream.into_split();
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
return Ok(());
}
let mut handshake = [0u8; HANDSHAKE_LEN];
handshake[..5].copy_from_slice(&first_bytes);
self.stream.read_exact(&mut handshake[5..]).await?;
let config = self.config.clone();
let replay_checker = self.replay_checker.clone();
let stats = self.stats.clone();
let buffer_pool = self.buffer_pool.clone();
let (read_half, write_half) = self.stream.into_split();
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
&handshake, read_half, write_half, peer,
&config, &replay_checker, false,
).await {
HandshakeResult::Success(result) => result,
HandshakeResult::BadClient { reader, writer } => {
stats.increment_connects_bad();
handle_bad_client(reader, writer, &handshake, &config).await;
return Ok(());
}
HandshakeResult::Error(e) => return Err(e),
};
Self::handle_authenticated_static(
crypto_reader, crypto_writer, success,
self.upstream_manager, self.stats, self.config,
buffer_pool, self.rng,
).await
}
async fn handle_authenticated_static<R, W>(
client_reader: CryptoReader<R>,
client_writer: CryptoWriter<W>,
success: HandshakeSuccess,
upstream_manager: Arc<UpstreamManager>, upstream_manager: Arc<UpstreamManager>,
stats: Arc<Stats>,
config: Arc<ProxyConfig>,
buffer_pool: Arc<BufferPool>, buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>, rng: Arc<SecureRandom>,
) -> Result<()> me_pool: Option<Arc<MePool>>,
where
R: AsyncRead + Unpin + Send + 'static,
W: AsyncWrite + Unpin + Send + 'static,
{
let user = &success.user;
if let Err(e) = Self::check_user_limits_static(user, &config, &stats) {
warn!(user = %user, error = %e, "User limit exceeded");
return Err(e);
}
let dc_addr = Self::get_dc_addr_static(success.dc_idx, &config)?;
info!(
user = %user,
peer = %success.peer,
dc = success.dc_idx,
dc_addr = %dc_addr,
proto = ?success.proto_tag,
"Connecting to Telegram"
);
// Pass dc_idx for latency-based upstream selection
let tg_stream = upstream_manager.connect(dc_addr, Some(success.dc_idx)).await?;
debug!(peer = %success.peer, dc_addr = %dc_addr, "Connected, performing TG handshake");
let (tg_reader, tg_writer) = Self::do_tg_handshake_static(
tg_stream, &success, &config, rng.as_ref(),
).await?;
debug!(peer = %success.peer, "TG handshake complete, starting relay");
stats.increment_user_connects(user);
stats.increment_user_curr_connects(user);
let relay_result = relay_bidirectional(
client_reader, client_writer,
tg_reader, tg_writer,
user, Arc::clone(&stats), buffer_pool,
).await;
stats.decrement_user_curr_connects(user);
match &relay_result {
Ok(()) => debug!(user = %user, "Relay completed"),
Err(e) => debug!(user = %user, error = %e, "Relay ended with error"),
}
relay_result
} }
fn check_user_limits_static(user: &str, config: &ProxyConfig, stats: &Stats) -> Result<()> { impl ClientHandler {
if let Some(expiration) = config.access.user_expirations.get(user) { pub fn new(
if chrono::Utc::now() > *expiration { stream: TcpStream,
return Err(ProxyError::UserExpired { user: user.to_string() }); peer: SocketAddr,
config: Arc<ProxyConfig>,
stats: Arc<Stats>,
upstream_manager: Arc<UpstreamManager>,
replay_checker: Arc<ReplayChecker>,
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
me_pool: Option<Arc<MePool>>,
) -> RunningClientHandler {
RunningClientHandler {
stream, peer, config, stats, replay_checker,
upstream_manager, buffer_pool, rng, me_pool,
} }
} }
if let Some(limit) = config.access.user_max_tcp_conns.get(user) {
if stats.get_user_curr_connects(user) >= *limit as u64 {
return Err(ProxyError::ConnectionLimitExceeded { user: user.to_string() });
}
}
if let Some(quota) = config.access.user_data_quota.get(user) {
if stats.get_user_total_octets(user) >= *quota {
return Err(ProxyError::DataQuotaExceeded { user: user.to_string() });
}
}
Ok(())
} }
/// Resolve DC index to a target address. impl RunningClientHandler {
/// pub async fn run(mut self) -> Result<()> {
/// Matches the C implementation's behavior exactly: self.stats.increment_connects_all();
///
/// 1. Look up DC in known clusters (standard DCs ±1..±5)
/// 2. If not found and `force=1` → fall back to `default_cluster`
///
/// In the C code:
/// - `proxy-multi.conf` is downloaded from Telegram, contains only DC ±1..±5
/// - `default 2;` directive sets the default cluster
/// - `mf_cluster_lookup(CurConf, target_dc, 1)` returns default_cluster
/// for any unknown DC (like CDN DC 203)
///
/// So DC 203, DC 101, DC -300, etc. all route to the default DC (2).
/// There is NO modular arithmetic in the C implementation.
fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
let datacenters = if config.general.prefer_ipv6 {
&*TG_DATACENTERS_V6
} else {
&*TG_DATACENTERS_V4
};
let num_dcs = datacenters.len(); // 5 let peer = self.peer;
debug!(peer = %peer, "New connection");
// === Step 1: Check dc_overrides (like C's `proxy_for <dc> <ip>:<port>`) === if let Err(e) = configure_client_socket(
let dc_key = dc_idx.to_string(); &self.stream,
if let Some(addr_str) = config.dc_overrides.get(&dc_key) { self.config.timeouts.client_keepalive,
match addr_str.parse::<SocketAddr>() { self.config.timeouts.client_ack,
Ok(addr) => { ) {
debug!(dc_idx = dc_idx, addr = %addr, "Using DC override from config"); debug!(peer = %peer, error = %e, "Failed to configure client socket");
return Ok(addr); }
let handshake_timeout = Duration::from_secs(self.config.timeouts.client_handshake);
let stats = self.stats.clone();
let result = timeout(handshake_timeout, self.do_handshake()).await;
match result {
Ok(Ok(())) => {
debug!(peer = %peer, "Connection handled successfully");
Ok(())
}
Ok(Err(e)) => {
debug!(peer = %peer, error = %e, "Handshake failed");
Err(e)
} }
Err(_) => { Err(_) => {
warn!(dc_idx = dc_idx, addr_str = %addr_str, stats.increment_handshake_timeouts();
"Invalid DC override address in config, ignoring"); debug!(peer = %peer, "Handshake timeout");
Err(ProxyError::TgHandshakeTimeout)
} }
} }
} }
// === Step 2: Standard DCs ±1..±5 — direct lookup === async fn do_handshake(mut self) -> Result<()> {
let abs_dc = dc_idx.unsigned_abs() as usize; let mut first_bytes = [0u8; 5];
if abs_dc >= 1 && abs_dc <= num_dcs { self.stream.read_exact(&mut first_bytes).await?;
return Ok(SocketAddr::new(datacenters[abs_dc - 1], TG_DATACENTER_PORT));
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
let peer = self.peer;
debug!(peer = %peer, is_tls = is_tls, "Handshake type detected");
if is_tls {
self.handle_tls_client(first_bytes).await
} else {
self.handle_direct_client(first_bytes).await
}
} }
// === Step 3: Unknown DC — fall back to default_cluster === async fn handle_tls_client(mut self, first_bytes: [u8; 5]) -> Result<()> {
// Exactly like C's `mf_cluster_lookup(CurConf, target_dc, force=1)` let peer = self.peer;
// which returns `MC->default_cluster` when the DC is not found.
// Telegram's proxy-multi.conf uses `default 2;`
let default_dc = config.default_dc.unwrap_or(2) as usize;
let fallback_idx = if default_dc >= 1 && default_dc <= num_dcs {
default_dc - 1
} else {
1 // DC 2 (index 1) — matches Telegram's `default 2;`
};
info!( let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
original_dc = dc_idx,
fallback_dc = (fallback_idx + 1) as u16,
fallback_addr = %datacenters[fallback_idx],
"Special DC ---> default_cluster"
);
Ok(SocketAddr::new(datacenters[fallback_idx], TG_DATACENTER_PORT)) debug!(peer = %peer, tls_len = tls_len, "Reading TLS handshake");
if tls_len < 512 {
debug!(peer = %peer, tls_len = tls_len, "TLS handshake too short");
self.stats.increment_connects_bad();
let (reader, writer) = self.stream.into_split();
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
return Ok(());
}
let mut handshake = vec![0u8; 5 + tls_len];
handshake[..5].copy_from_slice(&first_bytes);
self.stream.read_exact(&mut handshake[5..]).await?;
let config = self.config.clone();
let replay_checker = self.replay_checker.clone();
let stats = self.stats.clone();
let buffer_pool = self.buffer_pool.clone();
let (read_half, write_half) = self.stream.into_split();
let (mut tls_reader, tls_writer, _tls_user) = match handle_tls_handshake(
&handshake, read_half, write_half, peer,
&config, &replay_checker, &self.rng,
).await {
HandshakeResult::Success(result) => result,
HandshakeResult::BadClient { reader, writer } => {
stats.increment_connects_bad();
handle_bad_client(reader, writer, &handshake, &config).await;
return Ok(());
}
HandshakeResult::Error(e) => return Err(e),
};
debug!(peer = %peer, "Reading MTProto handshake through TLS");
let mtproto_data = tls_reader.read_exact(HANDSHAKE_LEN).await?;
let mtproto_handshake: [u8; HANDSHAKE_LEN] = mtproto_data[..].try_into()
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
&mtproto_handshake, tls_reader, tls_writer, peer,
&config, &replay_checker, true,
).await {
HandshakeResult::Success(result) => result,
HandshakeResult::BadClient { reader: _, writer: _ } => {
stats.increment_connects_bad();
debug!(peer = %peer, "Valid TLS but invalid MTProto handshake");
return Ok(());
}
HandshakeResult::Error(e) => return Err(e),
};
Self::handle_authenticated_static(
crypto_reader, crypto_writer, success,
self.upstream_manager, self.stats, self.config,
buffer_pool, self.rng, self.me_pool,
).await
}
async fn handle_direct_client(mut self, first_bytes: [u8; 5]) -> Result<()> {
let peer = self.peer;
if !self.config.general.modes.classic && !self.config.general.modes.secure {
debug!(peer = %peer, "Non-TLS modes disabled");
self.stats.increment_connects_bad();
let (reader, writer) = self.stream.into_split();
handle_bad_client(reader, writer, &first_bytes, &self.config).await;
return Ok(());
}
let mut handshake = [0u8; HANDSHAKE_LEN];
handshake[..5].copy_from_slice(&first_bytes);
self.stream.read_exact(&mut handshake[5..]).await?;
let config = self.config.clone();
let replay_checker = self.replay_checker.clone();
let stats = self.stats.clone();
let buffer_pool = self.buffer_pool.clone();
let (read_half, write_half) = self.stream.into_split();
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
&handshake, read_half, write_half, peer,
&config, &replay_checker, false,
).await {
HandshakeResult::Success(result) => result,
HandshakeResult::BadClient { reader, writer } => {
stats.increment_connects_bad();
handle_bad_client(reader, writer, &handshake, &config).await;
return Ok(());
}
HandshakeResult::Error(e) => return Err(e),
};
Self::handle_authenticated_static(
crypto_reader, crypto_writer, success,
self.upstream_manager, self.stats, self.config,
buffer_pool, self.rng, self.me_pool,
).await
}
/// Main dispatch after successful handshake.
/// Two modes:
/// - Direct: TCP relay to TG DC (existing behavior)
/// - Middle Proxy: RPC multiplex through ME pool (new — supports CDN DCs)
async fn handle_authenticated_static<R, W>(
client_reader: CryptoReader<R>,
client_writer: CryptoWriter<W>,
success: HandshakeSuccess,
upstream_manager: Arc<UpstreamManager>,
stats: Arc<Stats>,
config: Arc<ProxyConfig>,
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
me_pool: Option<Arc<MePool>>,
) -> Result<()>
where
R: AsyncRead + Unpin + Send + 'static,
W: AsyncWrite + Unpin + Send + 'static,
{
let user = &success.user;
if let Err(e) = Self::check_user_limits_static(user, &config, &stats) {
warn!(user = %user, error = %e, "User limit exceeded");
return Err(e);
}
// Decide: middle proxy or direct
if config.general.use_middle_proxy {
if let Some(ref pool) = me_pool {
return Self::handle_via_middle_proxy(
client_reader, client_writer, success,
pool.clone(), stats, config, buffer_pool,
).await;
}
warn!("use_middle_proxy=true but MePool not initialized, falling back to direct");
}
// Direct mode (original behavior)
Self::handle_via_direct(
client_reader, client_writer, success,
upstream_manager, stats, config, buffer_pool, rng,
).await
}
// =====================================================================
// Direct mode — TCP relay to Telegram DC
// =====================================================================
async fn handle_via_direct<R, W>(
client_reader: CryptoReader<R>,
client_writer: CryptoWriter<W>,
success: HandshakeSuccess,
upstream_manager: Arc<UpstreamManager>,
stats: Arc<Stats>,
config: Arc<ProxyConfig>,
buffer_pool: Arc<BufferPool>,
rng: Arc<SecureRandom>,
) -> Result<()>
where
R: AsyncRead + Unpin + Send + 'static,
W: AsyncWrite + Unpin + Send + 'static,
{
let user = &success.user;
let dc_addr = Self::get_dc_addr_static(success.dc_idx, &config)?;
info!(
user = %user,
peer = %success.peer,
dc = success.dc_idx,
dc_addr = %dc_addr,
proto = ?success.proto_tag,
mode = "direct",
"Connecting to Telegram DC"
);
let tg_stream = upstream_manager.connect(dc_addr, Some(success.dc_idx)).await?;
debug!(peer = %success.peer, dc_addr = %dc_addr, "Connected, performing TG handshake");
let (tg_reader, tg_writer) = Self::do_tg_handshake_static(
tg_stream, &success, &config, rng.as_ref(),
).await?;
debug!(peer = %success.peer, "TG handshake complete, starting relay");
stats.increment_user_connects(user);
stats.increment_user_curr_connects(user);
let relay_result = relay_bidirectional(
client_reader, client_writer,
tg_reader, tg_writer,
user, Arc::clone(&stats), buffer_pool,
).await;
stats.decrement_user_curr_connects(user);
match &relay_result {
Ok(()) => debug!(user = %user, "Direct relay completed"),
Err(e) => debug!(user = %user, error = %e, "Direct relay ended with error"),
}
relay_result
}
// =====================================================================
// Middle Proxy mode — RPC multiplex through ME pool
// =====================================================================
/// Middle Proxy RPC relay
///
/// Architecture (matches C MTProxy):
/// ```text
/// Client ←AES-CTR→ [telemt] ←RPC/AES-CBC→ ME ←internal→ DC (any, incl CDN 203)
/// ```
///
/// Key difference from direct mode:
/// - No per-client TCP to DC; all clients share ME pool connections
/// - ME internally routes to correct DC based on client's encrypted auth_key_id
/// - CDN DCs (203+) work because ME knows their internal addresses
/// - We pass raw client MTProto bytes in RPC_PROXY_REQ envelope
/// - ME returns responses in RPC_PROXY_ANS envelope
async fn handle_via_middle_proxy<R, W>(
mut client_reader: CryptoReader<R>,
mut client_writer: CryptoWriter<W>,
success: HandshakeSuccess,
me_pool: Arc<MePool>,
stats: Arc<Stats>,
config: Arc<ProxyConfig>,
_buffer_pool: Arc<BufferPool>,
) -> Result<()>
where
R: AsyncRead + Unpin + Send + 'static,
W: AsyncWrite + Unpin + Send + 'static,
{
let user = success.user.clone();
let peer = success.peer;
info!(
user = %user,
peer = %peer,
dc = success.dc_idx,
proto = ?success.proto_tag,
mode = "middle_proxy",
"Routing via Middle-End"
);
// Register this client connection in ME demux registry
let (conn_id, mut me_rx) = me_pool.registry().register().await;
// Our listening address for RPC_PROXY_REQ metadata
let our_addr: SocketAddr = format!("0.0.0.0:{}", config.server.port)
.parse().unwrap_or_else(|_| "0.0.0.0:443".parse().unwrap());
stats.increment_user_connects(&user);
stats.increment_user_curr_connects(&user);
debug!(user = %user, conn_id, "ME relay started");
// Bidirectional relay loop: client ↔ ME pool
//
// C→S direction: read raw bytes from client_reader, wrap in RPC_PROXY_REQ, send via ME
// S→C direction: receive MeResponse::Data from registry channel, write to client_writer
//
// We use tokio::select! to handle both directions concurrently.
// Unlike direct mode (copy_bidirectional on two TCP streams),
// here one side is a channel (mpsc::Receiver), not a stream.
let mut client_buf = vec![0u8; 64 * 1024];
let mut client_closed = false;
let mut server_closed = false;
let result: Result<()> = loop {
tokio::select! {
// C→S: client sends data, we forward to ME
read_result = client_reader.read(&mut client_buf), if !client_closed => {
match read_result {
Ok(0) => {
debug!(conn_id, "Client EOF");
client_closed = true;
if server_closed { break Ok(()); }
// Signal ME to close this connection
let _ = me_pool.send_close(conn_id).await;
}
Ok(n) => {
trace!(conn_id, bytes = n, "C→ME");
stats.add_user_octets_from(&user, n as u64);
if let Err(e) = me_pool.send_proxy_req(
conn_id, peer, our_addr, &client_buf[..n]
).await {
break Err(e);
}
}
Err(e) => {
debug!(conn_id, error = %e, "Client read error");
break Err(ProxyError::Io(e));
}
}
}
// S→C: ME sends response, we forward to client
me_msg = me_rx.recv(), if !server_closed => {
match me_msg {
Some(MeResponse::Data(data)) => {
trace!(conn_id, bytes = data.len(), "ME→C");
stats.add_user_octets_to(&user, data.len() as u64);
if let Err(e) = client_writer.write_all(&data).await {
debug!(conn_id, error = %e, "Client write error");
break Err(ProxyError::Io(e));
}
if let Err(e) = client_writer.flush().await {
break Err(ProxyError::Io(e));
}
}
Some(MeResponse::Ack(_token)) => {
// QuickACK from ME — could forward to client as obfuscated ACK
// For now, just log
trace!(conn_id, "ME ACK (ignored)");
}
Some(MeResponse::Close) => {
debug!(conn_id, "ME sent CLOSE");
server_closed = true;
if client_closed { break Ok(()); }
}
None => {
// Channel closed — ME connection died
debug!(conn_id, "ME channel closed");
server_closed = true;
if client_closed { break Ok(()); }
break Err(ProxyError::Proxy("ME connection lost".into()));
}
}
}
// Both sides closed
else => {
break Ok(());
}
}
};
// Cleanup
debug!(user = %user, conn_id, "ME relay cleanup");
me_pool.registry().unregister(conn_id).await;
stats.decrement_user_curr_connects(&user);
match &result {
Ok(()) => debug!(user = %user, conn_id, "ME relay completed"),
Err(e) => debug!(user = %user, conn_id, error = %e, "ME relay error"),
}
result
}
// =====================================================================
// Helpers
// =====================================================================
fn check_user_limits_static(user: &str, config: &ProxyConfig, stats: &Stats) -> Result<()> {
if let Some(expiration) = config.access.user_expirations.get(user) {
if chrono::Utc::now() > *expiration {
return Err(ProxyError::UserExpired { user: user.to_string() });
}
}
if let Some(limit) = config.access.user_max_tcp_conns.get(user) {
if stats.get_user_curr_connects(user) >= *limit as u64 {
return Err(ProxyError::ConnectionLimitExceeded { user: user.to_string() });
}
}
if let Some(quota) = config.access.user_data_quota.get(user) {
if stats.get_user_total_octets(user) >= *quota {
return Err(ProxyError::DataQuotaExceeded { user: user.to_string() });
}
}
Ok(())
}
/// Resolve DC index to target address (used only in direct mode)
fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
let datacenters = if config.general.prefer_ipv6 {
&*TG_DATACENTERS_V6
} else {
&*TG_DATACENTERS_V4
};
let num_dcs = datacenters.len();
let dc_key = dc_idx.to_string();
if let Some(addr_str) = config.dc_overrides.get(&dc_key) {
match addr_str.parse::<SocketAddr>() {
Ok(addr) => {
debug!(dc_idx = dc_idx, addr = %addr, "Using DC override from config");
return Ok(addr);
}
Err(_) => {
warn!(dc_idx = dc_idx, addr_str = %addr_str,
"Invalid DC override address in config, ignoring");
}
}
}
let abs_dc = dc_idx.unsigned_abs() as usize;
if abs_dc >= 1 && abs_dc <= num_dcs {
return Ok(SocketAddr::new(datacenters[abs_dc - 1], TG_DATACENTER_PORT));
}
let default_dc = config.default_dc.unwrap_or(2) as usize;
let fallback_idx = if default_dc >= 1 && default_dc <= num_dcs {
default_dc - 1
} else {
1
};
info!(
original_dc = dc_idx,
fallback_dc = (fallback_idx + 1) as u16,
fallback_addr = %datacenters[fallback_idx],
"Special DC ---> default_cluster"
);
Ok(SocketAddr::new(datacenters[fallback_idx], TG_DATACENTER_PORT))
}
/// Perform obfuscated handshake with Telegram DC (direct mode only)
async fn do_tg_handshake_static(
mut stream: TcpStream,
success: &HandshakeSuccess,
config: &ProxyConfig,
rng: &SecureRandom,
) -> Result<(CryptoReader<tokio::net::tcp::OwnedReadHalf>, CryptoWriter<tokio::net::tcp::OwnedWriteHalf>)> {
let (nonce, tg_enc_key, tg_enc_iv, tg_dec_key, tg_dec_iv) = generate_tg_nonce(
success.proto_tag,
success.dc_idx,
&success.dec_key,
success.dec_iv,
rng,
config.general.fast_mode,
);
let encrypted_nonce = encrypt_tg_nonce(&nonce);
debug!(
peer = %success.peer,
nonce_head = %hex::encode(&nonce[..16]),
"Sending nonce to Telegram"
);
stream.write_all(&encrypted_nonce).await?;
stream.flush().await?;
let (read_half, write_half) = stream.into_split();
let decryptor = AesCtr::new(&tg_dec_key, tg_dec_iv);
let encryptor = AesCtr::new(&tg_enc_key, tg_enc_iv);
Ok((
CryptoReader::new(read_half, decryptor),
CryptoWriter::new(write_half, encryptor),
))
}
} }
async fn do_tg_handshake_static(
mut stream: TcpStream,
success: &HandshakeSuccess,
config: &ProxyConfig,
rng: &SecureRandom,
) -> Result<(CryptoReader<tokio::net::tcp::OwnedReadHalf>, CryptoWriter<tokio::net::tcp::OwnedWriteHalf>)> {
let (nonce, tg_enc_key, tg_enc_iv, tg_dec_key, tg_dec_iv) = generate_tg_nonce(
success.proto_tag,
success.dc_idx,
&success.dec_key,
success.dec_iv,
rng,
config.general.fast_mode,
);
let encrypted_nonce = encrypt_tg_nonce(&nonce);
debug!(
peer = %success.peer,
nonce_head = %hex::encode(&nonce[..16]),
"Sending nonce to Telegram"
);
stream.write_all(&encrypted_nonce).await?;
stream.flush().await?;
let (read_half, write_half) = stream.into_split();
let decryptor = AesCtr::new(&tg_dec_key, tg_dec_iv);
let encryptor = AesCtr::new(&tg_enc_key, tg_enc_iv);
Ok((
CryptoReader::new(read_half, decryptor),
CryptoWriter::new(write_half, encryptor),
))
}
}

File diff suppressed because it is too large Load Diff