IP Version Superfallback

This commit is contained in:
Alexey
2026-02-14 00:26:07 +03:00
parent de28655dd2
commit 9b850b0bfb
4 changed files with 775 additions and 581 deletions

View File

@@ -1,35 +1,35 @@
//! telemt — Telegram MTProto Proxy
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use tokio::net::TcpListener;
use tokio::signal;
use tokio::sync::Semaphore;
use tracing::{info, error, warn, debug};
use tracing_subscriber::{fmt, EnvFilter, reload, prelude::*};
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use tokio::net::TcpListener;
use tokio::signal;
use tokio::sync::Semaphore;
use tracing::{info, error, warn, debug};
use tracing_subscriber::{fmt, EnvFilter, reload, prelude::*};
mod cli;
mod config;
mod crypto;
mod error;
mod protocol;
mod proxy;
mod stats;
mod stream;
mod transport;
mod util;
mod cli;
mod config;
mod crypto;
mod error;
mod protocol;
mod proxy;
mod stats;
mod stream;
mod transport;
mod util;
use crate::config::{ProxyConfig, LogLevel};
use crate::proxy::ClientHandler;
use crate::stats::{Stats, ReplayChecker};
use crate::crypto::SecureRandom;
use crate::transport::{create_listener, ListenOptions, UpstreamManager};
use crate::transport::middle_proxy::MePool;
use crate::util::ip::detect_ip;
use crate::stream::BufferPool;
use crate::config::{ProxyConfig, LogLevel};
use crate::proxy::ClientHandler;
use crate::stats::{Stats, ReplayChecker};
use crate::crypto::SecureRandom;
use crate::transport::{create_listener, ListenOptions, UpstreamManager};
use crate::transport::middle_proxy::MePool;
use crate::util::ip::detect_ip;
use crate::stream::BufferPool;
fn parse_cli() -> (String, bool, Option<String>) {
fn parse_cli() -> (String, bool, Option<String>) {
let mut config_path = "config.toml".to_string();
let mut silent = false;
let mut log_level: Option<String> = None;
@@ -81,10 +81,10 @@
}
(config_path, silent, log_level)
}
}
#[tokio::main]
async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
#[tokio::main]
async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
let (config_path, cli_silent, cli_log_level) = parse_cli();
let config = match ProxyConfig::load(&config_path) {
@@ -238,25 +238,69 @@
// Startup DC ping (only meaningful in direct mode)
if me_pool.is_none() {
info!("=== Telegram DC Connectivity ===");
info!("================= Telegram DC Connectivity =================");
let ping_results = upstream_manager.ping_all_dcs(prefer_ipv6).await;
for upstream_result in &ping_results {
// Show which IP version is in use and which is fallback
if upstream_result.both_available {
if prefer_ipv6 {
info!(" IPv6 in use and IPv4 is fallback");
} else {
info!(" IPv4 in use and IPv6 is fallback");
}
} else {
let v6_works = upstream_result.v6_results.iter().any(|r| r.rtt_ms.is_some());
let v4_works = upstream_result.v4_results.iter().any(|r| r.rtt_ms.is_some());
if v6_works && !v4_works {
info!(" IPv6 only (IPv4 unavailable)");
} else if v4_works && !v6_works {
info!(" IPv4 only (IPv6 unavailable)");
} else if !v6_works && !v4_works {
info!(" No connectivity!");
}
}
info!(" via {}", upstream_result.upstream_name);
for dc in &upstream_result.results {
match (&dc.rtt_ms, &dc.error) {
(Some(rtt), _) => {
info!(" DC{} ({:>21}): {:.0}ms", dc.dc_idx, dc.dc_addr, rtt);
info!("============================================================");
// Print IPv6 results first
for dc in &upstream_result.v6_results {
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
match &dc.rtt_ms {
Some(rtt) => {
// Align: IPv6 addresses are longer, use fewer tabs
// [2001:b28:f23d:f001::a]:443 = ~28 chars
info!(" DC{} [IPv6] {}:\t\t{:.0} ms", dc.dc_idx, addr_str, rtt);
}
(None, Some(err)) => {
info!(" DC{} ({:>21}): FAIL ({})", dc.dc_idx, dc.dc_addr, err);
}
_ => {
info!(" DC{} ({:>21}): FAIL", dc.dc_idx, dc.dc_addr);
None => {
let err = dc.error.as_deref().unwrap_or("fail");
info!(" DC{} [IPv6] {}:\t\tFAIL ({})", dc.dc_idx, addr_str, err);
}
}
}
info!("============================================================");
// Print IPv4 results
for dc in &upstream_result.v4_results {
let addr_str = format!("{}:{}", dc.dc_addr.ip(), dc.dc_addr.port());
match &dc.rtt_ms {
Some(rtt) => {
// Align: IPv4 addresses are shorter, use more tabs
// 149.154.175.50:443 = ~18 chars
info!(" DC{} [IPv4] {}:\t\t\t\t{:.0} ms", dc.dc_idx, addr_str, rtt);
}
None => {
let err = dc.error.as_deref().unwrap_or("fail");
info!(" DC{} [IPv4] {}:\t\t\t\tFAIL ({})", dc.dc_idx, addr_str, err);
}
}
}
info!("============================================================");
}
info!("================================");
}
// Background tasks
@@ -387,5 +431,4 @@
}
Ok(())
}
}

View File

@@ -20,7 +20,7 @@
use crate::proxy::handshake::{
handle_tls_handshake, handle_mtproto_handshake,
HandshakeSuccess, generate_tg_nonce, encrypt_tg_nonce,
HandshakeSuccess, generate_tg_nonce, encrypt_tg_nonce_with_ciphers,
};
use crate::proxy::relay::relay_bidirectional;
use crate::proxy::masking::handle_bad_client;
@@ -340,8 +340,8 @@
/// - ME returns responses in RPC_PROXY_ANS envelope
async fn handle_via_middle_proxy<R, W>(
mut client_reader: CryptoReader<R>,
mut client_writer: CryptoWriter<W>,
crypto_reader: CryptoReader<R>,
crypto_writer: CryptoWriter<W>,
success: HandshakeSuccess,
me_pool: Arc<MePool>,
stats: Arc<Stats>,
@@ -352,6 +352,9 @@
R: AsyncRead + Unpin + Send + 'static,
W: AsyncWrite + Unpin + Send + 'static,
{
let mut client_reader = crypto_reader;
let mut client_writer = crypto_writer;
let user = success.user.clone();
let peer = success.peer;
@@ -636,7 +639,7 @@
config.general.fast_mode,
);
let encrypted_nonce = encrypt_tg_nonce(&nonce);
let (encrypted_nonce, mut tg_encryptor, tg_decryptor) = encrypt_tg_nonce_with_ciphers(&nonce);
debug!(
peer = %success.peer,
@@ -649,12 +652,9 @@
let (read_half, write_half) = stream.into_split();
let decryptor = AesCtr::new(&tg_dec_key, tg_dec_iv);
let encryptor = AesCtr::new(&tg_enc_key, tg_enc_iv);
Ok((
CryptoReader::new(read_half, decryptor),
CryptoWriter::new(write_half, encryptor),
CryptoReader::new(read_half, tg_decryptor),
CryptoWriter::new(write_half, tg_encryptor),
))
}
}

View File

@@ -218,7 +218,6 @@ where
replay_checker.add_handshake(dec_prekey_iv);
let decryptor = AesCtr::new(&dec_key, dec_iv);
let encryptor = AesCtr::new(&enc_key, enc_iv);
let success = HandshakeSuccess {
@@ -297,19 +296,32 @@ pub fn generate_tg_nonce(
}
}
/// Encrypt nonce for sending to Telegram
pub fn encrypt_tg_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
/// Encrypt nonce for sending to Telegram and return cipher objects with correct counter state
pub fn encrypt_tg_nonce_with_ciphers(nonce: &[u8; HANDSHAKE_LEN]) -> (Vec<u8>, AesCtr, AesCtr) {
let enc_key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
let key: [u8; 32] = enc_key_iv[..KEY_LEN].try_into().unwrap();
let iv = u128::from_be_bytes(enc_key_iv[KEY_LEN..].try_into().unwrap());
let dec_key_iv: Vec<u8> = enc_key_iv.iter().rev().copied().collect();
let mut encryptor = AesCtr::new(&key, iv);
let encrypted_full = encryptor.encrypt(nonce);
let enc_key: [u8; 32] = enc_key_iv[..KEY_LEN].try_into().unwrap();
let enc_iv = u128::from_be_bytes(enc_key_iv[KEY_LEN..].try_into().unwrap());
let dec_key: [u8; 32] = dec_key_iv[..KEY_LEN].try_into().unwrap();
let dec_iv = u128::from_be_bytes(dec_key_iv[KEY_LEN..].try_into().unwrap());
let mut encryptor = AesCtr::new(&enc_key, enc_iv);
let encrypted_full = encryptor.encrypt(nonce); // counter: 0 → 4
let mut result = nonce[..PROTO_TAG_POS].to_vec();
result.extend_from_slice(&encrypted_full[PROTO_TAG_POS..]);
result
let decryptor = AesCtr::new(&dec_key, dec_iv);
(result, encryptor, decryptor)
}
/// Encrypt nonce for sending to Telegram (legacy function for compatibility)
pub fn encrypt_tg_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
let (encrypted, _, _) = encrypt_tg_nonce_with_ciphers(nonce);
encrypted
}
#[cfg(test)]

View File

@@ -1,4 +1,6 @@
//! Upstream Management with per-DC latency-weighted selection
//!
//! IPv6/IPv4 connectivity checks with configurable preference.
use std::net::{SocketAddr, IpAddr};
use std::sync::Arc;
@@ -18,6 +20,9 @@ use crate::transport::socks::{connect_socks4, connect_socks5};
/// Number of Telegram datacenters
const NUM_DCS: usize = 5;
/// Timeout for individual DC ping attempt
const DC_PING_TIMEOUT_SECS: u64 = 5;
// ============= RTT Tracking =============
#[derive(Debug, Clone, Copy)]
@@ -43,6 +48,29 @@ impl LatencyEma {
}
}
// ============= Per-DC IP Preference Tracking =============
/// Tracks which IP version works for each DC
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum IpPreference {
/// Not yet tested
Unknown,
/// IPv6 works
PreferV6,
/// Only IPv4 works (IPv6 failed)
PreferV4,
/// Both work
BothWork,
/// Both failed
Unavailable,
}
impl Default for IpPreference {
fn default() -> Self {
Self::Unknown
}
}
// ============= Upstream State =============
#[derive(Debug)]
@@ -53,6 +81,8 @@ struct UpstreamState {
last_check: std::time::Instant,
/// Per-DC latency EMA (index 0 = DC1, index 4 = DC5)
dc_latency: [LatencyEma; NUM_DCS],
/// Per-DC IP version preference (learned from connectivity tests)
dc_ip_pref: [IpPreference; NUM_DCS],
}
impl UpstreamState {
@@ -63,16 +93,11 @@ impl UpstreamState {
fails: 0,
last_check: std::time::Instant::now(),
dc_latency: [LatencyEma::new(0.3); NUM_DCS],
dc_ip_pref: [IpPreference::Unknown; NUM_DCS],
}
}
/// Map DC index to latency array slot (0..NUM_DCS).
///
/// Matches the C implementation's `mf_cluster_lookup` behavior:
/// - Standard DCs ±1..±5 → direct mapping to array index 0..4
/// - Unknown DCs (CDN, media, etc.) → default DC slot (index 1 = DC 2)
/// This matches Telegram's `default 2;` in proxy-multi.conf.
/// - There is NO modular arithmetic in the C implementation.
fn dc_array_idx(dc_idx: i16) -> Option<usize> {
let abs_dc = dc_idx.unsigned_abs() as usize;
if abs_dc == 0 {
@@ -82,21 +107,18 @@ impl UpstreamState {
Some(abs_dc - 1)
} else {
// Unknown DC → default cluster (DC 2, index 1)
// Same as C: mf_cluster_lookup returns default_cluster
Some(1)
}
}
/// Get latency for a specific DC, falling back to average across all known DCs
fn effective_latency(&self, dc_idx: Option<i16>) -> Option<f64> {
// Try DC-specific latency first
if let Some(di) = dc_idx.and_then(Self::dc_array_idx) {
if let Some(ms) = self.dc_latency[di].get() {
return Some(ms);
}
}
// Fallback: average of all known DC latencies
let (sum, count) = self.dc_latency.iter()
.filter_map(|l| l.get())
.fold((0.0, 0u32), |(s, c), v| (s + v, c + 1));
@@ -114,11 +136,14 @@ pub struct DcPingResult {
pub error: Option<String>,
}
/// Result of startup ping for one upstream
/// Result of startup ping for one upstream (separate v6/v4 results)
#[derive(Debug, Clone)]
pub struct StartupPingResult {
pub results: Vec<DcPingResult>,
pub v6_results: Vec<DcPingResult>,
pub v4_results: Vec<DcPingResult>,
pub upstream_name: String,
/// True if both IPv6 and IPv4 have at least one working DC
pub both_available: bool,
}
// ============= Upstream Manager =============
@@ -141,15 +166,6 @@ impl UpstreamManager {
}
/// Select upstream using latency-weighted random selection.
///
/// `effective_weight = config_weight × latency_factor`
///
/// where `latency_factor = 1000 / latency_ms` if latency is known,
/// or `1.0` if no latency data is available.
///
/// This means a 50ms upstream gets factor 20, a 200ms upstream gets
/// factor 5 — the faster route is 4× more likely to be chosen
/// (all else being equal).
async fn select_upstream(&self, dc_idx: Option<i16>) -> Option<usize> {
let upstreams = self.upstreams.read().await;
if upstreams.is_empty() {
@@ -163,7 +179,6 @@ impl UpstreamManager {
.collect();
if healthy.is_empty() {
// All unhealthy — pick any
return Some(rand::rng().gen_range(0..upstreams.len()));
}
@@ -171,7 +186,6 @@ impl UpstreamManager {
return Some(healthy[0]);
}
// Calculate latency-weighted scores
let weights: Vec<(usize, f64)> = healthy.iter().map(|&i| {
let base = upstreams[i].config.weight as f64;
let latency_factor = upstreams[i].effective_latency(dc_idx)
@@ -207,9 +221,6 @@ impl UpstreamManager {
}
/// Connect to target through a selected upstream.
///
/// `dc_idx` is used for latency-based upstream selection and RTT tracking.
/// Pass `None` if DC index is unknown.
pub async fn connect(&self, target: SocketAddr, dc_idx: Option<i16>) -> Result<TcpStream> {
let idx = self.select_upstream(dc_idx).await
.ok_or_else(|| ProxyError::Config("No upstreams available".to_string()))?;
@@ -232,7 +243,6 @@ impl UpstreamManager {
u.healthy = true;
u.fails = 0;
// Store per-DC latency
if let Some(di) = dc_idx.and_then(UpstreamState::dc_array_idx) {
u.dc_latency[di].update(rtt_ms);
}
@@ -336,9 +346,10 @@ impl UpstreamManager {
}
}
// ============= Startup Ping =============
// ============= Startup Ping (test both IPv6 and IPv4) =============
/// Ping all Telegram DCs through all upstreams.
/// Tests BOTH IPv6 and IPv4, returns separate results for each.
pub async fn ping_all_dcs(&self, prefer_ipv6: bool) -> Vec<StartupPingResult> {
let upstreams: Vec<(usize, UpstreamConfig)> = {
let guard = self.upstreams.read().await;
@@ -347,8 +358,6 @@ impl UpstreamManager {
.collect()
};
let datacenters = if prefer_ipv6 { &*TG_DATACENTERS_V6 } else { &*TG_DATACENTERS_V4 };
let mut all_results = Vec::new();
for (upstream_idx, upstream_config) in &upstreams {
@@ -360,50 +369,115 @@ impl UpstreamManager {
UpstreamType::Socks5 { address, .. } => format!("socks5://{}", address),
};
let mut dc_results = Vec::new();
let mut v6_results = Vec::new();
let mut v4_results = Vec::new();
for (dc_zero_idx, dc_ip) in datacenters.iter().enumerate() {
let dc_addr = SocketAddr::new(*dc_ip, TG_DATACENTER_PORT);
// === Ping IPv6 first ===
for dc_zero_idx in 0..NUM_DCS {
let dc_v6 = TG_DATACENTERS_V6[dc_zero_idx];
let addr_v6 = SocketAddr::new(dc_v6, TG_DATACENTER_PORT);
let ping_result = tokio::time::timeout(
Duration::from_secs(5),
self.ping_single_dc(upstream_config, dc_addr)
let result = tokio::time::timeout(
Duration::from_secs(DC_PING_TIMEOUT_SECS),
self.ping_single_dc(&upstream_config, addr_v6)
).await;
let result = match ping_result {
let ping_result = match result {
Ok(Ok(rtt_ms)) => {
// Store per-DC latency
let mut guard = self.upstreams.write().await;
if let Some(u) = guard.get_mut(*upstream_idx) {
u.dc_latency[dc_zero_idx].update(rtt_ms);
}
DcPingResult {
dc_idx: dc_zero_idx + 1,
dc_addr,
dc_addr: addr_v6,
rtt_ms: Some(rtt_ms),
error: None,
}
}
Ok(Err(e)) => DcPingResult {
dc_idx: dc_zero_idx + 1,
dc_addr,
dc_addr: addr_v6,
rtt_ms: None,
error: Some(e.to_string()),
},
Err(_) => DcPingResult {
dc_idx: dc_zero_idx + 1,
dc_addr,
dc_addr: addr_v6,
rtt_ms: None,
error: Some("timeout (5s)".to_string()),
error: Some("timeout".to_string()),
},
};
v6_results.push(ping_result);
}
dc_results.push(result);
// === Then ping IPv4 ===
for dc_zero_idx in 0..NUM_DCS {
let dc_v4 = TG_DATACENTERS_V4[dc_zero_idx];
let addr_v4 = SocketAddr::new(dc_v4, TG_DATACENTER_PORT);
let result = tokio::time::timeout(
Duration::from_secs(DC_PING_TIMEOUT_SECS),
self.ping_single_dc(&upstream_config, addr_v4)
).await;
let ping_result = match result {
Ok(Ok(rtt_ms)) => {
let mut guard = self.upstreams.write().await;
if let Some(u) = guard.get_mut(*upstream_idx) {
u.dc_latency[dc_zero_idx].update(rtt_ms);
}
DcPingResult {
dc_idx: dc_zero_idx + 1,
dc_addr: addr_v4,
rtt_ms: Some(rtt_ms),
error: None,
}
}
Ok(Err(e)) => DcPingResult {
dc_idx: dc_zero_idx + 1,
dc_addr: addr_v4,
rtt_ms: None,
error: Some(e.to_string()),
},
Err(_) => DcPingResult {
dc_idx: dc_zero_idx + 1,
dc_addr: addr_v4,
rtt_ms: None,
error: Some("timeout".to_string()),
},
};
v4_results.push(ping_result);
}
// Check if both IP versions have at least one working DC
let v6_has_working = v6_results.iter().any(|r| r.rtt_ms.is_some());
let v4_has_working = v4_results.iter().any(|r| r.rtt_ms.is_some());
let both_available = v6_has_working && v4_has_working;
// Update IP preference for each DC
{
let mut guard = self.upstreams.write().await;
if let Some(u) = guard.get_mut(*upstream_idx) {
for dc_zero_idx in 0..NUM_DCS {
let v6_ok = v6_results[dc_zero_idx].rtt_ms.is_some();
let v4_ok = v4_results[dc_zero_idx].rtt_ms.is_some();
u.dc_ip_pref[dc_zero_idx] = match (v6_ok, v4_ok) {
(true, true) => IpPreference::BothWork,
(true, false) => IpPreference::PreferV6,
(false, true) => IpPreference::PreferV4,
(false, false) => IpPreference::Unavailable,
};
}
}
}
all_results.push(StartupPingResult {
results: dc_results,
v6_results,
v4_results,
upstream_name,
both_available,
});
}
@@ -419,19 +493,30 @@ impl UpstreamManager {
// ============= Health Checks =============
/// Background health check: rotates through DCs, 30s interval.
/// Uses preferred IP version based on config.
pub async fn run_health_checks(&self, prefer_ipv6: bool) {
let datacenters = if prefer_ipv6 { &*TG_DATACENTERS_V6 } else { &*TG_DATACENTERS_V4 };
let mut dc_rotation = 0usize;
loop {
tokio::time::sleep(Duration::from_secs(30)).await;
let dc_zero_idx = dc_rotation % datacenters.len();
let dc_zero_idx = dc_rotation % NUM_DCS;
dc_rotation += 1;
let check_target = SocketAddr::new(datacenters[dc_zero_idx], TG_DATACENTER_PORT);
let dc_addr = if prefer_ipv6 {
SocketAddr::new(TG_DATACENTERS_V6[dc_zero_idx], TG_DATACENTER_PORT)
} else {
SocketAddr::new(TG_DATACENTERS_V4[dc_zero_idx], TG_DATACENTER_PORT)
};
let fallback_addr = if prefer_ipv6 {
SocketAddr::new(TG_DATACENTERS_V4[dc_zero_idx], TG_DATACENTER_PORT)
} else {
SocketAddr::new(TG_DATACENTERS_V6[dc_zero_idx], TG_DATACENTER_PORT)
};
let count = self.upstreams.read().await.len();
for i in 0..count {
let config = {
let guard = self.upstreams.read().await;
@@ -441,22 +526,50 @@ impl UpstreamManager {
let start = Instant::now();
let result = tokio::time::timeout(
Duration::from_secs(10),
self.connect_via_upstream(&config, check_target)
self.connect_via_upstream(&config, dc_addr)
).await;
match result {
Ok(Ok(_stream)) => {
let rtt_ms = start.elapsed().as_secs_f64() * 1000.0;
let mut guard = self.upstreams.write().await;
let u = &mut guard[i];
u.dc_latency[dc_zero_idx].update(rtt_ms);
if !u.healthy {
info!(
rtt = format!("{:.0} ms", rtt_ms),
dc = dc_zero_idx + 1,
"Upstream recovered"
);
}
u.healthy = true;
u.fails = 0;
u.last_check = std::time::Instant::now();
}
Ok(Err(_)) | Err(_) => {
// Try fallback
debug!(dc = dc_zero_idx + 1, "Health check failed, trying fallback");
let start2 = Instant::now();
let result2 = tokio::time::timeout(
Duration::from_secs(10),
self.connect_via_upstream(&config, fallback_addr)
).await;
let mut guard = self.upstreams.write().await;
let u = &mut guard[i];
match result {
match result2 {
Ok(Ok(_stream)) => {
let rtt_ms = start.elapsed().as_secs_f64() * 1000.0;
let rtt_ms = start2.elapsed().as_secs_f64() * 1000.0;
u.dc_latency[dc_zero_idx].update(rtt_ms);
if !u.healthy {
info!(
rtt = format!("{:.0}ms", rtt_ms),
rtt = format!("{:.0} ms", rtt_ms),
dc = dc_zero_idx + 1,
"Upstream recovered"
"Upstream recovered (fallback)"
);
}
u.healthy = true;
@@ -465,7 +578,7 @@ impl UpstreamManager {
Ok(Err(e)) => {
u.fails += 1;
debug!(dc = dc_zero_idx + 1, fails = u.fails,
"Health check failed: {}", e);
"Health check failed (both): {}", e);
if u.fails > 3 {
u.healthy = false;
warn!("Upstream unhealthy (fails)");
@@ -474,7 +587,7 @@ impl UpstreamManager {
Err(_) => {
u.fails += 1;
debug!(dc = dc_zero_idx + 1, fails = u.fails,
"Health check timeout");
"Health check timeout (both)");
if u.fails > 3 {
u.healthy = false;
warn!("Upstream unhealthy (timeout)");
@@ -485,4 +598,30 @@ impl UpstreamManager {
}
}
}
}
}
/// Get the preferred IP for a DC (for use by other components)
pub async fn get_dc_ip_preference(&self, dc_idx: i16) -> Option<IpPreference> {
let guard = self.upstreams.read().await;
if guard.is_empty() {
return None;
}
UpstreamState::dc_array_idx(dc_idx)
.map(|idx| guard[0].dc_ip_pref[idx])
}
/// Get preferred DC address based on config preference
pub async fn get_dc_addr(&self, dc_idx: i16, prefer_ipv6: bool) -> Option<SocketAddr> {
let arr_idx = UpstreamState::dc_array_idx(dc_idx)?;
let ip = if prefer_ipv6 {
TG_DATACENTERS_V6[arr_idx]
} else {
TG_DATACENTERS_V4[arr_idx]
};
Some(SocketAddr::new(ip, TG_DATACENTER_PORT))
}
}