aboutsummaryrefslogtreecommitdiff
path: root/src/main.rs
diff options
context:
space:
mode:
authorUMTS at Teleco <crt@teleco.ch>2026-03-08 07:30:34 +0100
committerUMTS at Teleco <crt@teleco.ch>2026-03-08 07:30:34 +0100
commit8623ef0ee74ff48a5ee24ee032f5b549f662f09d (patch)
tree7f11543d05cfe0e7bd5aaca31ff1d4c86a271fd0 /src/main.rs
goofy ah
Diffstat (limited to 'src/main.rs')
-rw-r--r--src/main.rs418
1 files changed, 418 insertions, 0 deletions
diff --git a/src/main.rs b/src/main.rs
new file mode 100644
index 0000000..aa0b993
--- /dev/null
+++ b/src/main.rs
@@ -0,0 +1,418 @@
+mod cli;
+mod config;
+mod lookup;
+mod output;
+mod tlds;
+mod tui;
+mod types;
+
+use clap::Parser;
+use cli::{print_fullhelp, print_help, Args};
+use config::{parse_filter_file, resolve_paths, Config};
+use tlds::{apply_top_tlds, get_tlds_or_default, whois_overrides};
+use types::{DomainResult, ErrorKind};
+
+#[derive(Debug)]
+struct AggregatedResult {
+ domain_idx: usize,
+ tld_idx: usize,
+ result: DomainResult,
+}
+
+#[tokio::main]
+async fn main() {
+ let args = Args::parse();
+
+ // handle help flags
+ if args.help {
+ print_help();
+ return;
+ }
+ if args.fullhelp {
+ print_fullhelp();
+ return;
+ }
+
+ // resolve .hoardom directory structure
+ let paths = resolve_paths(args.env_path.as_ref());
+ let mut config = Config::load_with_backup(&paths.config_file);
+
+ if !paths.can_save {
+ eprintln!("Warning: favorites and settings wont be saved (no writable location found)");
+ }
+
+ // handle -r refresh cache flag
+ if args.refresh_cache {
+ if !paths.caching_enabled {
+ eprintln!("Caching is disabled (no writable location). Nothing to refresh.");
+ return;
+ }
+ let cache_file = paths.cache_file("rdap_bootstrap.json");
+ match lookup::refresh_cache(&cache_file, args.verbose).await {
+ Ok(()) => {
+ config.mark_cache_updated();
+ if paths.can_save {
+ let _ = config.save(&paths.config_file);
+ }
+ }
+ Err(e) => eprintln!("Error refreshing cache: {}", e),
+ }
+ return;
+ }
+
+ // check if cache is stale and auto update if needed
+ let cache_file = if paths.caching_enabled {
+ Some(paths.cache_file("rdap_bootstrap.json"))
+ } else {
+ None
+ };
+
+ let force_refresh = if let Some(ref cf) = cache_file {
+ let (is_outdated, should_auto) = config.check_cache_status();
+ if is_outdated && !should_auto {
+ eprintln!("Warning: RDAP cache is outdated. Run `hoardom -r` to refresh.");
+ }
+ // force refresh if auto update says so, or if cache file doesnt exist yet
+ should_auto || !cf.exists()
+ } else {
+ false
+ };
+
+ // import custom filter if given
+ if let Some(filter_path) = &args.import_filter {
+ match parse_filter_file(filter_path) {
+ Ok(filter) => {
+ config.import_filter(filter);
+ if paths.can_save {
+ let _ = config.save(&paths.config_file);
+ }
+ }
+ Err(e) => {
+ eprintln!("Error importing filter: {}", e);
+ return;
+ }
+ }
+ }
+
+ // whois server overrides are baked into Lists.toml ("tld:server" syntax)
+ let overrides = whois_overrides();
+
+ // parse noretry config into ErrorKind list
+ let noretry: Vec<ErrorKind> = config.settings.noretry.iter()
+ .filter_map(|s| ErrorKind::from_config_str(s))
+ .collect();
+
+ // TUI mode
+ if args.is_tui() {
+ if let Err(e) = tui::run_tui(&args, &config, paths.clone(), cache_file.clone(), force_refresh, overrides.clone(), noretry.clone()).await {
+ eprintln!("TUI error: {}", e);
+ }
+ // save cache timestamp after TUI session if we refreshed
+ if force_refresh && paths.can_save {
+ config.mark_cache_updated();
+ let _ = config.save(&paths.config_file);
+ }
+ return;
+ }
+
+ // CLI needs at least one domain unless autosearch was given
+ if args.domains.is_empty() {
+ if let Some(file_path) = &args.autosearch {
+ run_autosearch(&args, file_path, cache_file.as_deref(), force_refresh, overrides, &noretry).await;
+ if force_refresh && paths.can_save {
+ config.mark_cache_updated();
+ let _ = config.save(&paths.config_file);
+ }
+ return;
+ }
+ print_help();
+ return;
+ }
+
+ let base_tlds = build_base_tlds(&args);
+ let total_lookups = estimate_total_lookups(&args.domains, &base_tlds);
+ let mut completed_lookups = 0usize;
+ let mut refresh_remaining = force_refresh;
+ let mut aggregated_results = Vec::new();
+
+ for (domain_idx, raw_domain) in args.domains.iter().enumerate() {
+ let (search_name, specific_tld) = parse_domain_input(raw_domain);
+ let effective_tlds = build_effective_tlds(&base_tlds, specific_tld.as_deref());
+
+ if effective_tlds.is_empty() {
+ eprintln!("No TLDs to search. Check your filter settings.");
+ return;
+ }
+
+ let results = lookup::lookup_all(
+ &search_name,
+ &effective_tlds,
+ args.effective_delay(),
+ args.effective_retry(),
+ args.verbose,
+ cache_file.as_deref(),
+ refresh_remaining,
+ args.effective_jobs(),
+ overrides,
+ &noretry,
+ |current, _total| {
+ output::print_progress(completed_lookups + current, total_lookups.max(1));
+ },
+ )
+ .await;
+
+ refresh_remaining = false;
+ completed_lookups += effective_tlds.len();
+
+ for result in results {
+ let tld_idx = effective_tlds
+ .iter()
+ .position(|tld| *tld == result.tld)
+ .unwrap_or(usize::MAX - 1);
+ aggregated_results.push(AggregatedResult {
+ domain_idx,
+ tld_idx,
+ result,
+ });
+ }
+
+ // Suggestions only kick in when directly searching a single full domain
+ if args.domains.len() == 1 && args.effective_suggestions() > 0 {
+ if let Some(exact_tld) = specific_tld.as_deref() {
+ let exact_registered = aggregated_results.iter().any(|item| {
+ item.result.name == search_name
+ && item.result.tld == exact_tld
+ && !item.result.is_available()
+ });
+
+ if exact_registered {
+ let suggestion_tlds: Vec<&'static str> = base_tlds
+ .iter()
+ .copied()
+ .filter(|tld| *tld != exact_tld)
+ .collect();
+
+ if !suggestion_tlds.is_empty() {
+ let suggestion_results = lookup::lookup_all(
+ &search_name,
+ &suggestion_tlds,
+ args.effective_delay(),
+ args.effective_retry(),
+ args.verbose,
+ cache_file.as_deref(),
+ false,
+ args.effective_jobs(),
+ overrides,
+ &noretry,
+ |_current, _total| {},
+ )
+ .await;
+
+ for result in suggestion_results
+ .into_iter()
+ .filter(|result| result.is_available())
+ .take(args.effective_suggestions())
+ {
+ let tld_idx = base_tlds
+ .iter()
+ .position(|tld| *tld == result.tld)
+ .unwrap_or(usize::MAX);
+ aggregated_results.push(AggregatedResult {
+ domain_idx,
+ tld_idx,
+ result,
+ });
+ }
+ }
+ }
+ }
+ }
+ }
+
+ let results = sort_aggregated_results(aggregated_results);
+
+ // save cache timestamp if we refreshed
+ if force_refresh && paths.can_save {
+ config.mark_cache_updated();
+ let _ = config.save(&paths.config_file);
+ }
+
+ // print errors first
+ output::print_errors(&results, args.verbose);
+
+ // CSV output
+ if let Some(csv_opt) = &args.csv {
+ match csv_opt {
+ Some(path) => {
+ // write to file
+ match output::write_csv_file(&results, path) {
+ Ok(()) => eprintln!("CSV written to {}", path.display()),
+ Err(e) => eprintln!("Error writing CSV: {}", e),
+ }
+ }
+ None => {
+ // print to stdout, no logs
+ output::print_csv(&results);
+ }
+ }
+ return;
+ }
+
+ // table output
+ if args.show_all {
+ output::print_full_table(&results, args.no_color, args.no_unicode);
+ } else {
+ output::print_available_table(&results, args.no_color, args.no_unicode);
+ }
+}
+
+async fn run_autosearch(
+ args: &Args,
+ file_path: &std::path::PathBuf,
+ cache_path: Option<&std::path::Path>,
+ force_refresh: bool,
+ overrides: &tlds::WhoisOverrides,
+ noretry: &[ErrorKind],
+) {
+ let content = match std::fs::read_to_string(file_path) {
+ Ok(c) => c,
+ Err(e) => {
+ eprintln!("Could not read autosearch file: {}", e);
+ return;
+ }
+ };
+
+ let base_tlds = build_base_tlds(args);
+
+ // collect all search entries, grouping by name so "zapplex.de" + "zapplex.nl" become one batch
+ let mut batches: Vec<(String, Vec<String>)> = Vec::new();
+
+ for line in content.lines() {
+ let trimmed = line.trim();
+ if trimmed.is_empty() {
+ continue;
+ }
+ if let Some(first) = trimmed.chars().next() {
+ if !first.is_alphanumeric() {
+ continue;
+ }
+ }
+
+ let (search_name, specific_tld) = if trimmed.contains('.') {
+ let parts: Vec<&str> = trimmed.splitn(2, '.').collect();
+ (parts[0].to_string(), Some(parts[1].to_string()))
+ } else {
+ (trimmed.to_string(), None)
+ };
+
+ let effective_tlds = build_effective_tlds(&base_tlds, specific_tld.as_deref());
+
+ let entry = if let Some(pos) = batches.iter().position(|(name, _)| *name == search_name) {
+ &mut batches[pos].1
+ } else {
+ batches.push((search_name, Vec::new()));
+ &mut batches.last_mut().unwrap().1
+ };
+ for tld in effective_tlds {
+ if !entry.contains(&tld.to_string()) {
+ entry.push(tld.to_string());
+ }
+ }
+ }
+
+ if batches.is_empty() {
+ eprintln!("No valid search terms in file");
+ return;
+ }
+
+ let total_lookups: usize = batches.iter().map(|(_, tlds)| tlds.len()).sum();
+ let mut completed = 0usize;
+ let mut all_results: Vec<DomainResult> = Vec::new();
+ let mut refresh_remaining = force_refresh;
+
+ for (search_name, tlds) in &batches {
+ let tld_refs: Vec<&str> = tlds.iter().map(|s| s.as_str()).collect();
+
+ let results = lookup::lookup_all(
+ search_name,
+ &tld_refs,
+ args.effective_delay(),
+ args.effective_retry(),
+ args.verbose,
+ cache_path,
+ refresh_remaining,
+ args.effective_jobs(),
+ overrides,
+ noretry,
+ |current, _total| {
+ output::print_progress(completed + current, total_lookups.max(1));
+ },
+ )
+ .await;
+
+ refresh_remaining = false;
+ completed += tlds.len();
+ all_results.extend(results);
+ }
+
+ output::print_errors(&all_results, args.verbose);
+
+ if args.show_all {
+ output::print_full_table(&all_results, args.no_color, args.no_unicode);
+ } else {
+ output::print_available_table(&all_results, args.no_color, args.no_unicode);
+ }
+}
+
+fn build_base_tlds(args: &Args) -> Vec<&'static str> {
+ let tld_list = args.effective_list();
+ let mut tld_vec = get_tlds_or_default(&tld_list);
+
+ if let Some(ref only) = args.only_top {
+ tld_vec = only
+ .iter()
+ .filter(|s| !s.is_empty())
+ .map(|s| -> &'static str { Box::leak(s.clone().into_boxed_str()) })
+ .collect();
+ }
+
+ if let Some(ref top) = args.top_tlds {
+ tld_vec = apply_top_tlds(tld_vec, top);
+ }
+
+ tld_vec
+}
+
+fn parse_domain_input(raw_domain: &str) -> (String, Option<String>) {
+ if raw_domain.contains('.') {
+ let parts: Vec<&str> = raw_domain.splitn(2, '.').collect();
+ (parts[0].to_string(), Some(parts[1].to_string()))
+ } else {
+ (raw_domain.to_string(), None)
+ }
+}
+
+fn build_effective_tlds(base_tlds: &[&'static str], specific_tld: Option<&str>) -> Vec<&'static str> {
+ if let Some(tld) = specific_tld {
+ vec![Box::leak(tld.to_string().into_boxed_str()) as &'static str]
+ } else {
+ base_tlds.to_vec()
+ }
+}
+
+fn estimate_total_lookups(domains: &[String], base_tlds: &[&'static str]) -> usize {
+ domains
+ .iter()
+ .map(|domain| if domain.contains('.') { 1 } else { base_tlds.len() })
+ .sum()
+}
+
+fn sort_aggregated_results(mut aggregated: Vec<AggregatedResult>) -> Vec<DomainResult> {
+ aggregated.sort_by(|a, b| {
+ a.tld_idx
+ .cmp(&b.tld_idx)
+ .then(a.domain_idx.cmp(&b.domain_idx))
+ });
+ aggregated.into_iter().map(|item| item.result).collect()
+}
+