diff --git a/Cargo.toml b/Cargo.toml index 8e33c0d..cece3e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,9 +29,6 @@ openssl = "0.10.52" base64 = "0.21.2" hostname = "0.3.1" bitflags = { version = "2.2.1", features = ["serde"] } -atomic = "0.5.3" -bigdecimal = "0.3.1" -num-bigint = "0.4.3" lazy_static = "1.4.0" poem = { version = "1.3.55", optional = true } sqlx = { git = "https://github.com/zert3x/sqlx", branch="feature/skip", features = ["mysql", "sqlite", "json", "chrono", "ipnetwork", "runtime-tokio-native-tls", "any"], optional = true } diff --git a/src/types/config/mod.rs b/src/types/config/mod.rs index 521d303..4a11c71 100644 --- a/src/types/config/mod.rs +++ b/src/types/config/mod.rs @@ -89,15 +89,15 @@ fn generate_pairs(obj: &Value, key: &str) -> Vec { fn pairs_to_config(pairs: Vec) -> ConfigValue { let mut value = Value::Object(Map::new()); - for p in pairs { - let keys: Vec<&str> = p.key.split('_').collect(); + for pair in pairs { + let keys: Vec<&str> = pair.key.split('_').collect(); let mut path = vec![]; for (i, &key) in keys.iter().enumerate() { path.push(key); if i == keys.len() - 1 { - insert_into(&mut value, &path, p.value.clone().unwrap_or(Value::Null)); + insert_into(&mut value, &path, pair.value.clone().unwrap_or(Value::Null)); } else if keys[i + 1].parse::().is_ok() { if !path_exists(&value, &path) { insert_into(&mut value, &path, Value::Array(Vec::new())); @@ -182,6 +182,7 @@ mod test { let pairs = generate_pairs(&v, ""); let cfg = pairs_to_config(pairs); + assert_eq!(cfg, c) } } diff --git a/src/types/utils/mod.rs b/src/types/utils/mod.rs index 7fd199f..1b1b3b6 100644 --- a/src/types/utils/mod.rs +++ b/src/types/utils/mod.rs @@ -1,6 +1,6 @@ pub use regexes::*; pub use rights::Rights; -pub use snowflake::{DeconstructedSnowflake, Snowflake}; +pub use snowflake::Snowflake; pub mod jwt; mod regexes; diff --git a/src/types/utils/snowflake.rs b/src/types/utils/snowflake.rs index 7c756fa..8502275 100644 --- a/src/types/utils/snowflake.rs +++ b/src/types/utils/snowflake.rs @@ -1,22 +1,41 @@ -use std::fmt::Display; +use std::{ + fmt::Display, + sync::atomic::{AtomicUsize, Ordering}, +}; -use atomic::Atomic; -use bigdecimal::{Num, ToPrimitive, Zero}; -use num_bigint::{BigInt, ToBigInt}; -use serde::{Deserialize, Serialize}; +use chrono::{DateTime, TimeZone, Utc}; #[cfg(feature = "sqlx")] use sqlx::Type; +/// 2015-01-01 const EPOCH: i64 = 1420070400000; -static WORKER_ID: u128 = 0; -static PROCESS_ID: u128 = 1; -lazy_static::lazy_static! { - static ref INCREMENT: Atomic = Atomic::default(); -} -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] + +/// Unique identifier including a timestamp. +/// See https://discord.com/developers/docs/reference#snowflakes +#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "sqlx", derive(Type))] #[cfg_attr(feature = "sqlx", sqlx(transparent))] -pub struct Snowflake(String); +pub struct Snowflake(u64); + +impl Snowflake { + pub fn generate() -> Self { + const WORKER_ID: u64 = 0; + const PROCESS_ID: u64 = 1; + static INCREMENT: AtomicUsize = AtomicUsize::new(0); + + let time = (Utc::now().naive_utc().timestamp_millis() - EPOCH) << 22; + let worker = WORKER_ID << 17; + let process = PROCESS_ID << 12; + let increment = INCREMENT.fetch_add(1, Ordering::Relaxed) as u64 % 32; + + Self(time as u64 | worker | process | increment) + } + + pub fn timestamp(self) -> DateTime { + Utc.timestamp_millis_opt((self.0 >> 22) as i64 + EPOCH) + .unwrap() + } +} impl Default for Snowflake { fn default() -> Self { @@ -30,131 +49,59 @@ impl Display for Snowflake { } } -impl Snowflake { - pub fn to_binary(&self) -> String { - let self_len = self.0.len(); - let high = self.0[..self_len - 10].parse::().unwrap_or(0); - let low = self.0[self_len - 10..].parse::().unwrap(); - let mut low = low; - let mut high = high; - let mut bin = Vec::with_capacity(64); - - while low > 0 || high > 0 { - bin.push((low & 1) as u8); - low >>= 1; - - if high > 0 { - low += 5_000_000_000 * (high % 2); - high >>= 1; - } - } - - bin.iter() - .rev() - .map(|b| char::from_digit(*b as u32, 10).unwrap()) - .collect() - } - - pub fn from_binary(num: &str) -> String { - let mut num = BigInt::from_str_radix(num, 2).unwrap(); - let mut dec = Vec::with_capacity(18); - - let ten = 10.to_bigint().unwrap(); - let _two = 2.to_bigint().unwrap(); - let _thirty_two = 32.to_bigint().unwrap(); - - while num.bits() > 50 { - let high: BigInt = &num >> 32; - let low: BigInt = (high.clone() % &ten) << 32 | &num & BigInt::from((1u64 << 32) - 1); - - let next: BigInt = low.clone() % &ten; - dec.push(next.to_u8().unwrap()); - num = (high / &ten) << 32 | (low / &ten); - } - - while !num.is_zero() { - dec.push((num.clone() % &ten).to_u8().unwrap()); - num /= &ten; - } - - dec.iter() - .rev() - .map(|d| char::from_digit(*d as u32, 10).unwrap()) - .collect() - } - - pub fn generate_worker_process() -> u128 { - let time = (chrono::Utc::now().naive_utc().timestamp_millis() - EPOCH) << 22; - let worker = WORKER_ID << 17; - let process = PROCESS_ID << 12; - let increment = INCREMENT.load(atomic::Ordering::Relaxed); - - INCREMENT.store(increment + 1, atomic::Ordering::Relaxed); - - time as u128 | worker | process | increment - } - - pub fn generate() -> Self { - Self(Self::generate_worker_process().to_string()) - } - - pub fn deconstruct(&self) -> DeconstructedSnowflake { - let binary = format!("{:0>64}", self.to_binary()); - - let ts = i64::from_str_radix(&binary[0..42], 2).unwrap() + EPOCH; - let wid = u64::from_str_radix(&binary[42..47], 2).unwrap(); - let pid = u64::from_str_radix(&binary[47..52], 2).unwrap(); - let increment = BigInt::from_str_radix(&binary[52..64], 2).unwrap(); - - DeconstructedSnowflake { - timestamp: ts, - worker_id: wid, - process_id: pid, - increment, - binary, - } +impl serde::Serialize for Snowflake { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.0.to_string()) } } -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct DeconstructedSnowflake { - pub timestamp: i64, - pub worker_id: u64, - pub process_id: u64, - pub increment: BigInt, - pub binary: String, +impl<'de> serde::Deserialize<'de> for Snowflake { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct SnowflakeVisitor; + impl<'de> serde::de::Visitor<'de> for SnowflakeVisitor { + type Value = Snowflake; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("snowflake string") + } + + fn visit_str(self, value: &str) -> Result + where + E: serde::de::Error, + { + match value.parse() { + Ok(value) => Ok(Snowflake(value)), + Err(_) => Err(serde::de::Error::custom("")), + } + } + } + deserializer.deserialize_str(SnowflakeVisitor) + } } #[cfg(test)] mod test { + use chrono::{DateTime, Utc}; + use super::Snowflake; #[test] - fn test_new_snowflake() { - let snow = Snowflake::generate(); - println!("{snow}"); + fn generate() { + let snow_1 = Snowflake::generate(); + let snow_2 = Snowflake::generate(); + assert!(snow_1.0 < snow_2.0) } #[test] - fn snowflake_to_binary() { - let snowflake = super::Snowflake("1104339392517902336".to_string()); - - let bin = snowflake.to_binary(); - println!("{bin}"); - } - - #[test] - fn binary_to_snowflake() { - let snowflake = super::Snowflake::from_binary( - "111101010011011001101101001110010010100000000001000000000000", - ); - println!("{snowflake}"); - } - - #[test] - fn test_deconstruct() { - let new = super::Snowflake::generate(); - - println!("{:?}", new.deconstruct()); + fn timestamp() { + let snow: Snowflake = serde_json::from_str("\"175928847299117063\"").unwrap(); + let timestamp = "2016-04-30 11:18:25.796Z".parse::>().unwrap(); + assert_eq!(snow.timestamp(), timestamp); } }