kanidmd/
main.rs

1#![deny(warnings)]
2#![warn(unused_extern_crates)]
3#![deny(clippy::todo)]
4#![deny(clippy::unimplemented)]
5#![deny(clippy::unwrap_used)]
6#![deny(clippy::expect_used)]
7#![deny(clippy::panic)]
8#![deny(clippy::unreachable)]
9#![deny(clippy::await_holding_lock)]
10#![deny(clippy::needless_pass_by_value)]
11#![deny(clippy::trivially_copy_pass_by_ref)]
12
13#[cfg(all(not(feature = "dhat-heap"), target_os = "linux"))]
14#[global_allocator]
15static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
16
17#[cfg(feature = "dhat-heap")]
18#[global_allocator]
19static ALLOC: dhat::Alloc = dhat::Alloc;
20
21use std::fs::{metadata, File};
22// This works on both unix and windows.
23use fs4::fs_std::FileExt;
24use kanidm_proto::messages::ConsoleOutputMode;
25use sketching::otel::TracingPipelineGuard;
26use std::io::Read;
27#[cfg(target_family = "unix")]
28use std::os::unix::fs::MetadataExt;
29use std::path::PathBuf;
30use std::process::ExitCode;
31
32use clap::{Args, Parser, Subcommand};
33use futures::{SinkExt, StreamExt};
34#[cfg(not(target_family = "windows"))] // not needed for windows builds
35use kanidm_utils_users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid};
36use kanidmd_core::admin::{
37    AdminTaskRequest, AdminTaskResponse, ClientCodec, ProtoDomainInfo,
38    ProtoDomainUpgradeCheckReport, ProtoDomainUpgradeCheckStatus,
39};
40use kanidmd_core::config::{CliConfig, Configuration, EnvironmentConfig, ServerConfigUntagged};
41use kanidmd_core::{
42    backup_server_core, cert_generate_core, create_server_core, dbscan_get_id2entry_core,
43    dbscan_list_id2entry_core, dbscan_list_index_analysis_core, dbscan_list_index_core,
44    dbscan_list_indexes_core, dbscan_list_quarantined_core, dbscan_quarantine_id2entry_core,
45    dbscan_restore_quarantined_core, domain_rename_core, reindex_server_core, restore_server_core,
46    vacuum_server_core, verify_server_core,
47};
48use sketching::tracing_forest::util::*;
49use tokio::net::UnixStream;
50use tokio_util::codec::Framed;
51#[cfg(target_family = "windows")] // for windows builds
52use whoami;
53
54include!("./opt.rs");
55
56impl KanidmdOpt {
57    fn commonopt(&self) -> &CommonOpt {
58        match self {
59            KanidmdOpt::Server(sopt)
60            | KanidmdOpt::CertGenerate(sopt)
61            | KanidmdOpt::ConfigTest(sopt)
62            | KanidmdOpt::DbScan {
63                commands: DbScanOpt::ListIndexes(sopt),
64            }
65            | KanidmdOpt::DbScan {
66                commands: DbScanOpt::ListId2Entry(sopt),
67            }
68            | KanidmdOpt::DbScan {
69                commands: DbScanOpt::ListIndexAnalysis(sopt),
70            } => sopt,
71            KanidmdOpt::Database {
72                commands: DbCommands::Backup(bopt),
73            } => &bopt.commonopts,
74            KanidmdOpt::Database {
75                commands: DbCommands::Restore(ropt),
76            } => &ropt.commonopts,
77            KanidmdOpt::DbScan {
78                commands: DbScanOpt::QuarantineId2Entry { commonopts, .. },
79            }
80            | KanidmdOpt::DbScan {
81                commands: DbScanOpt::ListQuarantined { commonopts },
82            }
83            | KanidmdOpt::DbScan {
84                commands: DbScanOpt::RestoreQuarantined { commonopts, .. },
85            }
86            | KanidmdOpt::ShowReplicationCertificate { commonopts }
87            | KanidmdOpt::RenewReplicationCertificate { commonopts }
88            | KanidmdOpt::RefreshReplicationConsumer { commonopts, .. } => commonopts,
89            KanidmdOpt::RecoverAccount { commonopts, .. } => commonopts,
90            KanidmdOpt::DisableAccount { commonopts, .. } => commonopts,
91            KanidmdOpt::DbScan {
92                commands: DbScanOpt::ListIndex(dopt),
93            } => &dopt.commonopts,
94            KanidmdOpt::DbScan {
95                commands: DbScanOpt::GetId2Entry(dopt),
96            } => &dopt.commonopts,
97            KanidmdOpt::DomainSettings {
98                commands: DomainSettingsCmds::Show { commonopts },
99            }
100            | KanidmdOpt::DomainSettings {
101                commands: DomainSettingsCmds::Change { commonopts },
102            }
103            | KanidmdOpt::DomainSettings {
104                commands: DomainSettingsCmds::UpgradeCheck { commonopts },
105            }
106            | KanidmdOpt::DomainSettings {
107                commands: DomainSettingsCmds::Raise { commonopts },
108            }
109            | KanidmdOpt::DomainSettings {
110                commands: DomainSettingsCmds::Remigrate { commonopts, .. },
111            } => commonopts,
112            KanidmdOpt::Database {
113                commands: DbCommands::Verify(sopt),
114            }
115            | KanidmdOpt::Database {
116                commands: DbCommands::Reindex(sopt),
117            } => sopt,
118            KanidmdOpt::Database {
119                commands: DbCommands::Vacuum(copt),
120            } => copt,
121            KanidmdOpt::HealthCheck(hcopt) => &hcopt.commonopts,
122            KanidmdOpt::Version(copt) => copt,
123        }
124    }
125}
126
127/// Get information on the windows username
128#[cfg(target_family = "windows")]
129fn get_user_details_windows() {
130    eprintln!(
131        "Running on windows, current username is: {:?}",
132        whoami::username()
133    );
134}
135
136async fn submit_admin_req(path: &str, req: AdminTaskRequest, output_mode: ConsoleOutputMode) {
137    // Connect to the socket.
138    let stream = match UnixStream::connect(path).await {
139        Ok(s) => s,
140        Err(e) => {
141            error!(err = ?e, %path, "Unable to connect to socket path");
142            let diag = kanidm_lib_file_permissions::diagnose_path(path.as_ref());
143            info!(%diag);
144            return;
145        }
146    };
147
148    let mut reqs = Framed::new(stream, ClientCodec);
149
150    if let Err(e) = reqs.send(req).await {
151        error!(err = ?e, "Unable to send request");
152        return;
153    };
154
155    if let Err(e) = reqs.flush().await {
156        error!(err = ?e, "Unable to flush request");
157        return;
158    }
159
160    trace!("flushed, waiting ...");
161
162    match reqs.next().await {
163        Some(Ok(AdminTaskResponse::RecoverAccount { password })) => match output_mode {
164            ConsoleOutputMode::JSON => {
165                let json_output = serde_json::json!({
166                    "password": password
167                });
168                println!("{json_output}");
169            }
170            ConsoleOutputMode::Text => {
171                info!(new_password = ?password)
172            }
173        },
174        Some(Ok(AdminTaskResponse::ShowReplicationCertificate { cert })) => match output_mode {
175            ConsoleOutputMode::JSON => {
176                println!("{{\"certificate\":\"{cert}\"}}")
177            }
178            ConsoleOutputMode::Text => {
179                info!(certificate = ?cert)
180            }
181        },
182
183        Some(Ok(AdminTaskResponse::DomainUpgradeCheck { report })) => {
184            match output_mode {
185                ConsoleOutputMode::JSON => {
186                    let json_output = serde_json::json!({
187                        "domain_upgrade_check": report
188                    });
189                    println!("{json_output}");
190                }
191                ConsoleOutputMode::Text => {
192                    let ProtoDomainUpgradeCheckReport {
193                        name,
194                        uuid,
195                        current_level,
196                        upgrade_level,
197                        report_items,
198                    } = report;
199
200                    info!("domain_name            : {}", name);
201                    info!("domain_uuid            : {}", uuid);
202                    info!("domain_current_level   : {}", current_level);
203                    info!("domain_upgrade_level   : {}", upgrade_level);
204
205                    for item in report_items {
206                        info!("------------------------");
207                        match item.status {
208                            ProtoDomainUpgradeCheckStatus::Pass6To7Gidnumber => {
209                                info!("upgrade_item           : gidnumber range validity");
210                                debug!("from_level             : {}", item.from_level);
211                                debug!("to_level               : {}", item.to_level);
212                                info!("status                 : PASS");
213                            }
214                            ProtoDomainUpgradeCheckStatus::Fail6To7Gidnumber => {
215                                info!("upgrade_item           : gidnumber range validity");
216                                debug!("from_level             : {}", item.from_level);
217                                debug!("to_level               : {}", item.to_level);
218                                info!("status                 : FAIL");
219                                info!("description            : The automatically allocated gidnumbers for posix accounts was found to allocate numbers into systemd-reserved ranges. These can no longer be used.");
220                                info!("action                 : Modify the gidnumber of affected entries so that they are in the range 65536 to 524287 OR reset the gidnumber to cause it to automatically regenerate.");
221                                for entry_id in item.affected_entries {
222                                    info!("affected_entry         : {}", entry_id);
223                                }
224                            }
225                            // ===========
226                            ProtoDomainUpgradeCheckStatus::Pass7To8SecurityKeys => {
227                                info!("upgrade_item           : security key usage");
228                                debug!("from_level             : {}", item.from_level);
229                                debug!("to_level               : {}", item.to_level);
230                                info!("status                 : PASS");
231                            }
232                            ProtoDomainUpgradeCheckStatus::Fail7To8SecurityKeys => {
233                                info!("upgrade_item           : security key usage");
234                                debug!("from_level             : {}", item.from_level);
235                                debug!("to_level               : {}", item.to_level);
236                                info!("status                 : FAIL");
237                                info!("description            : Security keys no longer function as a second factor due to the introduction of CTAP2 and greater forcing PIN interactions.");
238                                info!("action                 : Modify the accounts in question to remove their security key and add it as a passkey or enable TOTP");
239                                for entry_id in item.affected_entries {
240                                    info!("affected_entry         : {}", entry_id);
241                                }
242                            }
243                            // ===========
244                            ProtoDomainUpgradeCheckStatus::Pass7To8Oauth2StrictRedirectUri => {
245                                info!("upgrade_item           : oauth2 strict redirect uri enforcement");
246                                debug!("from_level             : {}", item.from_level);
247                                debug!("to_level               : {}", item.to_level);
248                                info!("status                 : PASS");
249                            }
250                            ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri => {
251                                info!("upgrade_item           : oauth2 strict redirect uri enforcement");
252                                debug!("from_level             : {}", item.from_level);
253                                debug!("to_level               : {}", item.to_level);
254                                info!("status                 : FAIL");
255                                info!("description            : To harden against possible public client open redirection vulnerabilities, redirect uris must now be registered ahead of time and are validated rather than the former origin verification process.");
256                                info!("action                 : Verify the redirect uri's for OAuth2 clients and then enable strict-redirect-uri on each client.");
257                                for entry_id in item.affected_entries {
258                                    info!("affected_entry         : {}", entry_id);
259                                }
260                            }
261                        }
262                    }
263                }
264            }
265        }
266
267        Some(Ok(AdminTaskResponse::DomainRaise { level })) => match output_mode {
268            ConsoleOutputMode::JSON => {
269                eprintln!("{{\"success\":\"{level}\"}}")
270            }
271            ConsoleOutputMode::Text => {
272                info!("success - raised domain level to {}", level)
273            }
274        },
275        Some(Ok(AdminTaskResponse::DomainShow { domain_info })) => match output_mode {
276            ConsoleOutputMode::JSON => {
277                let json_output = serde_json::json!({
278                    "domain_info": domain_info
279                });
280                println!("{json_output}");
281            }
282            ConsoleOutputMode::Text => {
283                let ProtoDomainInfo {
284                    name,
285                    displayname,
286                    uuid,
287                    level,
288                } = domain_info;
289
290                info!("domain_name   : {}", name);
291                info!("domain_display: {}", displayname);
292                info!("domain_uuid   : {}", uuid);
293                info!("domain_level  : {}", level);
294            }
295        },
296        Some(Ok(AdminTaskResponse::Success)) => match output_mode {
297            ConsoleOutputMode::JSON => {
298                eprintln!("\"success\"")
299            }
300            ConsoleOutputMode::Text => {
301                info!("success")
302            }
303        },
304        Some(Ok(AdminTaskResponse::Error)) => match output_mode {
305            ConsoleOutputMode::JSON => {
306                eprintln!("\"error\"")
307            }
308            ConsoleOutputMode::Text => {
309                info!("Error - you should inspect the logs.")
310            }
311        },
312        Some(Err(err)) => {
313            error!(?err, "Error during admin task operation");
314        }
315        None => {
316            error!("Error making request to admin socket");
317        }
318    }
319}
320
321/// Check what we're running as and various filesystem permissions.
322fn check_file_ownership(opt: &KanidmdParser) -> Result<(), ExitCode> {
323    // Get info about who we are.
324    #[cfg(target_family = "unix")]
325    let (cuid, ceuid) = {
326        let cuid = get_current_uid();
327        let ceuid = get_effective_uid();
328        let cgid = get_current_gid();
329        let cegid = get_effective_gid();
330
331        if cuid == 0 || ceuid == 0 || cgid == 0 || cegid == 0 {
332            warn!("This is running as uid == 0 (root) which may be a security risk.");
333            // eprintln!("ERROR: Refusing to run - this process must not operate as root.");
334            // std::process::exit(1);
335        }
336
337        if cuid != ceuid || cgid != cegid {
338            error!("{} != {} || {} != {}", cuid, ceuid, cgid, cegid);
339            error!("Refusing to run - uid and euid OR gid and egid must be consistent.");
340            return Err(ExitCode::FAILURE);
341        }
342        (cuid, ceuid)
343    };
344
345    if let Some(cfg_path) = opt.config_path() {
346        #[cfg(target_family = "unix")]
347        {
348            if let Some(cfg_meta) = match metadata(&cfg_path) {
349                Ok(m) => Some(m),
350                Err(e) => {
351                    error!(
352                        "Unable to read metadata for configuration file '{}' - {:?}",
353                        cfg_path.display(),
354                        e
355                    );
356                    // return ExitCxode::FAILURE;
357                    None
358                }
359            } {
360                if !kanidm_lib_file_permissions::readonly(&cfg_meta) {
361                    warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...",
362                        cfg_path.to_str().unwrap_or("invalid file path"));
363                }
364
365                if cfg_meta.mode() & 0o007 != 0 {
366                    warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...",
367                        cfg_path.to_str().unwrap_or("invalid file path")
368                        );
369                }
370
371                if cfg_meta.uid() == cuid || cfg_meta.uid() == ceuid {
372                    warn!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...",
373                        cfg_path.to_str().unwrap_or("invalid file path")
374                        );
375                }
376            }
377        }
378    }
379    Ok(())
380}
381
382// We have to do this because we can't use tracing until we've started the logging pipeline, and we can't start the logging pipeline until the tokio runtime's doing its thing.
383async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
384    // if we have a server config and it has an OTEL URL, then we'll start the logging pipeline now.
385
386    // TODO: only send to stderr when we're not in a TTY
387    let sub = match sketching::otel::start_logging_pipeline(
388        &config.otel_grpc_url,
389        config.log_level,
390        "kanidmd",
391    ) {
392        Err(err) => {
393            eprintln!("Error starting logger - {err:} - Bailing on startup!");
394            return ExitCode::FAILURE;
395        }
396        Ok(val) => val,
397    };
398
399    if let Err(err) = tracing::subscriber::set_global_default(sub).map_err(|err| {
400        eprintln!("Error starting logger - {err:} - Bailing on startup!");
401        ExitCode::FAILURE
402    }) {
403        return err;
404    };
405
406    // ************************************************
407    // HERE'S WHERE YOU CAN START USING THE LOGGER
408    // ************************************************
409
410    info!(version = %env!("KANIDM_PKG_VERSION"), "Starting Kanidmd");
411
412    // guard which shuts down the logging/tracing providers when we close out
413    let _otelguard = TracingPipelineGuard {};
414
415    // ===========================================================================
416    // Start pre-run checks
417
418    // Check the permissions of the files from the configuration.
419    if let Err(err) = check_file_ownership(&opt) {
420        return err;
421    };
422
423    if let Some(db_path) = config.db_path.as_ref() {
424        let db_pathbuf = db_path.to_path_buf();
425        // We can't check the db_path permissions because it may not exist yet!
426        if let Some(db_parent_path) = db_pathbuf.parent() {
427            if !db_parent_path.exists() {
428                warn!(
429                    "DB folder {} may not exist, server startup may FAIL!",
430                    db_parent_path.to_str().unwrap_or("invalid file path")
431                );
432                let diag = kanidm_lib_file_permissions::diagnose_path(&db_pathbuf);
433                info!(%diag);
434            }
435
436            let db_par_path_buf = db_parent_path.to_path_buf();
437            let i_meta = match metadata(&db_par_path_buf) {
438                Ok(m) => m,
439                Err(e) => {
440                    error!(
441                        "Unable to read metadata for database folder '{}' - {:?}",
442                        &db_par_path_buf.to_str().unwrap_or("invalid file path"),
443                        e
444                    );
445                    return ExitCode::FAILURE;
446                }
447            };
448            if !i_meta.is_dir() {
449                error!(
450                    "ERROR: Refusing to run - DB folder {} may not be a directory",
451                    db_par_path_buf.to_str().unwrap_or("invalid file path")
452                );
453                return ExitCode::FAILURE;
454            }
455
456            if kanidm_lib_file_permissions::readonly(&i_meta) {
457                warn!("WARNING: DB folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", db_par_path_buf.to_str().unwrap_or("invalid file path"));
458            }
459            #[cfg(not(target_os = "windows"))]
460            if i_meta.mode() & 0o007 != 0 {
461                warn!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str().unwrap_or("invalid file path"));
462            }
463        }
464    } else {
465        error!("No db_path set in configuration, server startup will FAIL!");
466        return ExitCode::FAILURE;
467    }
468
469    let lock_was_setup = match &opt.commands {
470        // we aren't going to touch the DB so we can carry on
471        KanidmdOpt::ShowReplicationCertificate { .. }
472        | KanidmdOpt::RenewReplicationCertificate { .. }
473        | KanidmdOpt::RefreshReplicationConsumer { .. }
474        | KanidmdOpt::RecoverAccount { .. }
475        | KanidmdOpt::DisableAccount { .. }
476        | KanidmdOpt::HealthCheck(_) => None,
477        _ => {
478            // Okay - Lets now create our lock and go.
479            #[allow(clippy::expect_used)]
480            let klock_path = match config.db_path.clone() {
481                Some(val) => val.with_extension("klock"),
482                None => std::env::temp_dir().join("kanidmd.klock"),
483            };
484
485            let flock = match File::create(&klock_path) {
486                Ok(flock) => flock,
487                Err(err) => {
488                    error!(
489                        "ERROR: Refusing to start - unable to create kanidmd exclusive lock at {}",
490                        klock_path.display()
491                    );
492                    error!(?err);
493                    return ExitCode::FAILURE;
494                }
495            };
496
497            match flock.try_lock_exclusive() {
498                Ok(true) => debug!("Acquired kanidm exclusive lock"),
499                Ok(false) => {
500                    error!(
501                        "ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {}",
502                        klock_path.display()
503                    );
504                    error!("Is another kanidmd process running?");
505                    return ExitCode::FAILURE;
506                }
507                Err(err) => {
508                    error!(
509                        "ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {}",
510                        klock_path.display()
511                    );
512                    error!(?err);
513                    return ExitCode::FAILURE;
514                }
515            };
516
517            Some(klock_path)
518        }
519    };
520
521    let result_code = kanidm_main(config, opt).await;
522
523    if let Some(klock_path) = lock_was_setup {
524        if let Err(reason) = std::fs::remove_file(&klock_path) {
525            warn!(
526                ?reason,
527                "WARNING: Unable to clean up kanidmd exclusive lock at {}",
528                klock_path.display()
529            );
530        }
531    }
532
533    result_code
534}
535
536fn main() -> ExitCode {
537    // On linux when debug assertions are disabled, prevent ptrace
538    // from attaching to us.
539    #[cfg(all(target_os = "linux", not(debug_assertions)))]
540    if let Err(code) = prctl::set_dumpable(false) {
541        println!(
542            "CRITICAL: Unable to set prctl flags, which breaches our security model, quitting! {:?}", code
543        );
544        return ExitCode::FAILURE;
545    }
546
547    // We need enough backtrace depth to find leak sources if they exist.
548    #[cfg(feature = "dhat-heap")]
549    let _profiler = dhat::Profiler::builder().trim_backtraces(Some(40)).build();
550
551    // Read CLI args, determine what the user has asked us to do.
552    let opt = KanidmdParser::parse();
553
554    // print the app version and bail
555    if let KanidmdOpt::Version(_) = &opt.commands {
556        println!("kanidmd {}", env!("KANIDM_PKG_VERSION"));
557        return ExitCode::SUCCESS;
558    };
559
560    if env!("KANIDM_SERVER_CONFIG_PATH").is_empty() {
561        println!("CRITICAL: Kanidmd was not built correctly and is missing a valid KANIDM_SERVER_CONFIG_PATH value");
562        return ExitCode::FAILURE;
563    }
564
565    let default_config_path = PathBuf::from(env!("KANIDM_SERVER_CONFIG_PATH"));
566
567    let maybe_config_path = if let Some(p) = opt.config_path() {
568        Some(p)
569    } else {
570        // The user didn't ask for a file, lets check if the default path exists?
571        if default_config_path.exists() {
572            // It does, lets use it.
573            Some(default_config_path)
574        } else {
575            // No default config, and no config specified, lets assume the user
576            // has selected environment variables.
577            None
578        }
579    };
580
581    let maybe_sconfig = if let Some(config_path) = maybe_config_path {
582        match ServerConfigUntagged::new(config_path) {
583            Ok(c) => Some(c),
584            Err(err) => {
585                eprintln!("ERROR: Configuration Parse Failure: {err:?}");
586                return ExitCode::FAILURE;
587            }
588        }
589    } else {
590        eprintln!("WARNING: No configuration path was provided, relying on environment variables.");
591        None
592    };
593
594    let envconfig = match EnvironmentConfig::new() {
595        Ok(ec) => ec,
596        Err(err) => {
597            eprintln!("ERROR: Environment Configuration Parse Failure: {err:?}");
598            return ExitCode::FAILURE;
599        }
600    };
601
602    let cli_config = CliConfig {
603        output_mode: Some(opt.commands.commonopt().output_mode.to_owned().into()),
604    };
605
606    let is_server = matches!(&opt.commands, KanidmdOpt::Server(_));
607
608    let config = Configuration::build()
609        .add_env_config(envconfig)
610        .add_opt_toml_config(maybe_sconfig)
611        // We always set threads to 1 unless it's the main server.
612        .add_cli_config(cli_config)
613        .is_server_mode(is_server)
614        .finish();
615
616    let Some(config) = config else {
617        eprintln!(
618            "ERROR: Unable to build server configuration from provided configuration inputs."
619        );
620        return ExitCode::FAILURE;
621    };
622
623    // ===========================================================================
624    // Config ready
625
626    // Get information on the windows username
627    #[cfg(target_family = "windows")]
628    get_user_details_windows();
629
630    // Start the runtime
631    let maybe_rt = tokio::runtime::Builder::new_multi_thread()
632        .worker_threads(config.threads)
633        .enable_all()
634        .thread_name("kanidmd-thread-pool")
635        // .thread_stack_size(8 * 1024 * 1024)
636        // If we want a hook for thread start.
637        // .on_thread_start()
638        // In future, we can stop the whole process if a panic occurs.
639        // .unhandled_panic(tokio::runtime::UnhandledPanic::ShutdownRuntime)
640        .build();
641
642    let rt = match maybe_rt {
643        Ok(rt) => rt,
644        Err(err) => {
645            eprintln!("CRITICAL: Unable to start runtime! {err:?}");
646            return ExitCode::FAILURE;
647        }
648    };
649
650    rt.block_on(start_daemon(opt, config))
651}
652
653/// Build and execute the main server. The ServerConfig are the configuration options
654/// that we are processing into the config for the main server.
655async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
656    match &opt.commands {
657        KanidmdOpt::Server(_sopt) | KanidmdOpt::ConfigTest(_sopt) => {
658            let config_test = matches!(&opt.commands, KanidmdOpt::ConfigTest(_));
659            if config_test {
660                info!("Running in server configuration test mode ...");
661            } else {
662                info!("Running in server mode ...");
663            };
664
665            // Verify the TLs configs.
666            if let Some(tls_config) = config.tls_config.as_ref() {
667                {
668                    let i_meta = match metadata(&tls_config.chain) {
669                        Ok(m) => m,
670                        Err(e) => {
671                            error!(
672                                "Unable to read metadata for TLS chain file '{}' - {:?}",
673                                tls_config.chain.display(),
674                                e
675                            );
676                            let diag =
677                                kanidm_lib_file_permissions::diagnose_path(&tls_config.chain);
678                            info!(%diag);
679                            return ExitCode::FAILURE;
680                        }
681                    };
682                    if !kanidm_lib_file_permissions::readonly(&i_meta) {
683                        warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.chain.display());
684                    }
685                }
686
687                {
688                    let i_meta = match metadata(&tls_config.key) {
689                        Ok(m) => m,
690                        Err(e) => {
691                            error!(
692                                "Unable to read metadata for TLS key file '{}' - {:?}",
693                                tls_config.key.display(),
694                                e
695                            );
696                            let diag = kanidm_lib_file_permissions::diagnose_path(&tls_config.key);
697                            info!(%diag);
698                            return ExitCode::FAILURE;
699                        }
700                    };
701                    if !kanidm_lib_file_permissions::readonly(&i_meta) {
702                        warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.key.display());
703                    }
704                    #[cfg(not(target_os = "windows"))]
705                    if i_meta.mode() & 0o007 != 0 {
706                        warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", tls_config.key.display());
707                    }
708                }
709
710                if let Some(ca_dir) = tls_config.client_ca.as_ref() {
711                    // check that the TLS client CA config option is what we expect
712                    let ca_dir_path = PathBuf::from(&ca_dir);
713                    if !ca_dir_path.exists() {
714                        error!(
715                            "TLS CA folder {} does not exist, server startup will FAIL!",
716                            ca_dir.display()
717                        );
718                        let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
719                        info!(%diag);
720                    }
721
722                    let i_meta = match metadata(&ca_dir_path) {
723                        Ok(m) => m,
724                        Err(e) => {
725                            error!(
726                                "Unable to read metadata for '{}' - {:?}",
727                                ca_dir.display(),
728                                e
729                            );
730                            let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
731                            info!(%diag);
732                            return ExitCode::FAILURE;
733                        }
734                    };
735                    if !i_meta.is_dir() {
736                        error!(
737                            "ERROR: Refusing to run - TLS Client CA folder {} may not be a directory",
738                            ca_dir.display()
739                        );
740                        return ExitCode::FAILURE;
741                    }
742                    if kanidm_lib_file_permissions::readonly(&i_meta) {
743                        warn!("WARNING: TLS Client CA folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", ca_dir.display());
744                    }
745                    #[cfg(not(target_os = "windows"))]
746                    if i_meta.mode() & 0o007 != 0 {
747                        warn!("WARNING: TLS Client CA folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", ca_dir.display());
748                    }
749                }
750            }
751
752            let sctx = create_server_core(config, config_test).await;
753            if !config_test {
754                // On linux, notify systemd.
755                #[cfg(target_os = "linux")]
756                {
757                    let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]);
758                    let _ = sd_notify::notify(
759                        true,
760                        &[sd_notify::NotifyState::Status("Started Kanidm 🦀")],
761                    );
762                };
763
764                match sctx {
765                    Ok(mut sctx) => {
766                        loop {
767                            #[cfg(target_family = "unix")]
768                            {
769                                let mut listener = sctx.subscribe();
770                                tokio::select! {
771                                    Ok(()) = tokio::signal::ctrl_c() => {
772                                        break
773                                    }
774                                    Some(()) = async move {
775                                        let sigterm = tokio::signal::unix::SignalKind::terminate();
776                                        #[allow(clippy::unwrap_used)]
777                                        tokio::signal::unix::signal(sigterm).unwrap().recv().await
778                                    } => {
779                                        break
780                                    }
781                                    Some(()) = async move {
782                                        let sigterm = tokio::signal::unix::SignalKind::alarm();
783                                        #[allow(clippy::unwrap_used)]
784                                        tokio::signal::unix::signal(sigterm).unwrap().recv().await
785                                    } => {
786                                        // Ignore
787                                    }
788                                    Some(()) = async move {
789                                        let sigterm = tokio::signal::unix::SignalKind::hangup();
790                                        #[allow(clippy::unwrap_used)]
791                                        tokio::signal::unix::signal(sigterm).unwrap().recv().await
792                                    } => {
793                                        // Reload TLS certificates
794                                        // systemd has a special reload handler for this.
795                                        #[cfg(target_os = "linux")]
796                                        {
797                                            if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
798                                                let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Reloading, monotonic_usec]);
799                                                let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reloading ...")]);
800                                            } else {
801                                                error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
802                                            };
803                                        }
804
805                                        sctx.tls_acceptor_reload().await;
806
807                                        // Systemd freaks out if you send the ready state too fast after the
808                                        // reload state and can kill Kanidmd as a result.
809                                        tokio::time::sleep(std::time::Duration::from_secs(5)).await;
810
811                                        #[cfg(target_os = "linux")]
812                                        {
813                                            if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
814                                                let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready, monotonic_usec]);
815                                                let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reload Success")]);
816                                            } else {
817                                                error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
818                                            };
819                                        }
820
821                                        info!("Reload complete");
822                                    }
823                                    Some(()) = async move {
824                                        let sigterm = tokio::signal::unix::SignalKind::user_defined1();
825                                        #[allow(clippy::unwrap_used)]
826                                        tokio::signal::unix::signal(sigterm).unwrap().recv().await
827                                    } => {
828                                        // Ignore
829                                    }
830                                    Some(()) = async move {
831                                        let sigterm = tokio::signal::unix::SignalKind::user_defined2();
832                                        #[allow(clippy::unwrap_used)]
833                                        tokio::signal::unix::signal(sigterm).unwrap().recv().await
834                                    } => {
835                                        // Ignore
836                                    }
837                                    // we got a message on thr broadcast from somewhere else
838                                    Ok(msg) = async move {
839                                        listener.recv().await
840                                    } => {
841                                        debug!("Main loop received message: {:?}", msg);
842                                        break
843                                    }
844                                }
845                            }
846                            #[cfg(target_family = "windows")]
847                            {
848                                tokio::select! {
849                                    Ok(()) = tokio::signal::ctrl_c() => {
850                                        break
851                                    }
852                                }
853                            }
854                        }
855                        info!("Signal received, shutting down");
856                        // Send a broadcast that we are done.
857                        sctx.shutdown().await;
858                    }
859                    Err(_) => {
860                        error!("Failed to start server core!");
861                        // We may need to return an exit code here, but that may take some re-architecting
862                        // to ensure we drop everything cleanly.
863                        return ExitCode::FAILURE;
864                    }
865                }
866                info!("Stopped 🛑 ");
867            }
868        }
869        KanidmdOpt::CertGenerate(_sopt) => {
870            info!("Running in certificate generate mode ...");
871            cert_generate_core(&config);
872        }
873        KanidmdOpt::Database {
874            commands: DbCommands::Backup(bopt),
875        } => {
876            info!("Running in backup mode ...");
877            backup_server_core(&config, &bopt.path);
878        }
879        KanidmdOpt::Database {
880            commands: DbCommands::Restore(ropt),
881        } => {
882            info!("Running in restore mode ...");
883            restore_server_core(&config, &ropt.path).await;
884        }
885        KanidmdOpt::Database {
886            commands: DbCommands::Verify(_vopt),
887        } => {
888            info!("Running in db verification mode ...");
889            verify_server_core(&config).await;
890        }
891        KanidmdOpt::ShowReplicationCertificate { commonopts } => {
892            info!("Running show replication certificate ...");
893            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
894            submit_admin_req(
895                config.adminbindpath.as_str(),
896                AdminTaskRequest::ShowReplicationCertificate,
897                output_mode,
898            )
899            .await;
900        }
901        KanidmdOpt::RenewReplicationCertificate { commonopts } => {
902            info!("Running renew replication certificate ...");
903            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
904            submit_admin_req(
905                config.adminbindpath.as_str(),
906                AdminTaskRequest::RenewReplicationCertificate,
907                output_mode,
908            )
909            .await;
910        }
911        KanidmdOpt::RefreshReplicationConsumer {
912            commonopts,
913            proceed,
914        } => {
915            info!("Running refresh replication consumer ...");
916            if !proceed {
917                error!("Unwilling to proceed. Check --help.");
918            } else {
919                let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
920                submit_admin_req(
921                    config.adminbindpath.as_str(),
922                    AdminTaskRequest::RefreshReplicationConsumer,
923                    output_mode,
924                )
925                .await;
926            }
927        }
928        KanidmdOpt::RecoverAccount { name, commonopts } => {
929            info!("Running account recovery ...");
930            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
931            submit_admin_req(
932                config.adminbindpath.as_str(),
933                AdminTaskRequest::RecoverAccount {
934                    name: name.to_owned(),
935                },
936                output_mode,
937            )
938            .await;
939        }
940        KanidmdOpt::DisableAccount { name, commonopts } => {
941            info!("Running account disable ...");
942            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
943            submit_admin_req(
944                config.adminbindpath.as_str(),
945                AdminTaskRequest::DisableAccount {
946                    name: name.to_owned(),
947                },
948                output_mode,
949            )
950            .await;
951        }
952        KanidmdOpt::Database {
953            commands: DbCommands::Reindex(_copt),
954        } => {
955            info!("Running in reindex mode ...");
956            reindex_server_core(&config).await;
957        }
958        KanidmdOpt::DbScan {
959            commands: DbScanOpt::ListIndexes(_),
960        } => {
961            info!("👀 db scan - list indexes");
962            dbscan_list_indexes_core(&config);
963        }
964        KanidmdOpt::DbScan {
965            commands: DbScanOpt::ListId2Entry(_),
966        } => {
967            info!("👀 db scan - list id2entry");
968            dbscan_list_id2entry_core(&config);
969        }
970        KanidmdOpt::DbScan {
971            commands: DbScanOpt::ListIndexAnalysis(_),
972        } => {
973            info!("👀 db scan - list index analysis");
974            dbscan_list_index_analysis_core(&config);
975        }
976        KanidmdOpt::DbScan {
977            commands: DbScanOpt::ListIndex(dopt),
978        } => {
979            info!("👀 db scan - list index content - {}", dopt.index_name);
980            dbscan_list_index_core(&config, dopt.index_name.as_str());
981        }
982        KanidmdOpt::DbScan {
983            commands: DbScanOpt::GetId2Entry(dopt),
984        } => {
985            info!("👀 db scan - get id2 entry - {}", dopt.id);
986            dbscan_get_id2entry_core(&config, dopt.id);
987        }
988
989        KanidmdOpt::DbScan {
990            commands: DbScanOpt::QuarantineId2Entry { id, commonopts: _ },
991        } => {
992            info!("☣️  db scan - quarantine id2 entry - {}", id);
993            dbscan_quarantine_id2entry_core(&config, *id);
994        }
995
996        KanidmdOpt::DbScan {
997            commands: DbScanOpt::ListQuarantined { commonopts: _ },
998        } => {
999            info!("☣️  db scan - list quarantined");
1000            dbscan_list_quarantined_core(&config);
1001        }
1002
1003        KanidmdOpt::DbScan {
1004            commands: DbScanOpt::RestoreQuarantined { id, commonopts: _ },
1005        } => {
1006            info!("☣️  db scan - restore quarantined entry - {}", id);
1007            dbscan_restore_quarantined_core(&config, *id);
1008        }
1009
1010        KanidmdOpt::DomainSettings {
1011            commands: DomainSettingsCmds::Change { .. },
1012        } => {
1013            info!("Running in domain name change mode ... this may take a long time ...");
1014            domain_rename_core(&config).await;
1015        }
1016
1017        KanidmdOpt::DomainSettings {
1018            commands: DomainSettingsCmds::Show { commonopts },
1019        } => {
1020            info!("Running domain show ...");
1021            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
1022            submit_admin_req(
1023                config.adminbindpath.as_str(),
1024                AdminTaskRequest::DomainShow,
1025                output_mode,
1026            )
1027            .await;
1028        }
1029
1030        KanidmdOpt::DomainSettings {
1031            commands: DomainSettingsCmds::UpgradeCheck { commonopts },
1032        } => {
1033            info!("Running domain upgrade check ...");
1034            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
1035            submit_admin_req(
1036                config.adminbindpath.as_str(),
1037                AdminTaskRequest::DomainUpgradeCheck,
1038                output_mode,
1039            )
1040            .await;
1041        }
1042
1043        KanidmdOpt::DomainSettings {
1044            commands: DomainSettingsCmds::Raise { commonopts },
1045        } => {
1046            info!("Running domain raise ...");
1047            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
1048            submit_admin_req(
1049                config.adminbindpath.as_str(),
1050                AdminTaskRequest::DomainRaise,
1051                output_mode,
1052            )
1053            .await;
1054        }
1055
1056        KanidmdOpt::DomainSettings {
1057            commands: DomainSettingsCmds::Remigrate { commonopts, level },
1058        } => {
1059            info!("⚠️  Running domain remigrate ...");
1060            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
1061            submit_admin_req(
1062                config.adminbindpath.as_str(),
1063                AdminTaskRequest::DomainRemigrate { level: *level },
1064                output_mode,
1065            )
1066            .await;
1067        }
1068
1069        KanidmdOpt::Database {
1070            commands: DbCommands::Vacuum(_copt),
1071        } => {
1072            info!("Running in vacuum mode ...");
1073            vacuum_server_core(&config);
1074        }
1075        KanidmdOpt::HealthCheck(sopt) => {
1076            debug!("{sopt:?}");
1077
1078            let healthcheck_url = match &sopt.check_origin {
1079                true => format!("{}/status", config.origin),
1080                false => {
1081                    // the replace covers when you specify an ipv6-capable "all" address
1082                    format!(
1083                        "https://{}/status",
1084                        config.address.replace("[::]", "localhost")
1085                    )
1086                }
1087            };
1088
1089            info!("Checking {healthcheck_url}");
1090
1091            let mut client = reqwest::ClientBuilder::new()
1092                .danger_accept_invalid_certs(!sopt.verify_tls)
1093                .danger_accept_invalid_hostnames(!sopt.verify_tls)
1094                .https_only(true);
1095
1096            client = match &config.tls_config {
1097                None => client,
1098                Some(tls_config) => {
1099                    debug!(
1100                        "Trying to load {} to build a CA cert path",
1101                        tls_config.chain.display()
1102                    );
1103                    // if the ca_cert file exists, then we'll use it
1104                    let ca_cert_path = tls_config.chain.clone();
1105                    match ca_cert_path.exists() {
1106                        true => {
1107                            let mut cert_buf = Vec::new();
1108                            if let Err(err) = std::fs::File::open(&ca_cert_path)
1109                                .and_then(|mut file| file.read_to_end(&mut cert_buf))
1110                            {
1111                                error!(
1112                                    "Failed to read {:?} from filesystem: {:?}",
1113                                    ca_cert_path, err
1114                                );
1115                                return ExitCode::FAILURE;
1116                            }
1117
1118                            let ca_chain_parsed =
1119                                match reqwest::Certificate::from_pem_bundle(&cert_buf) {
1120                                    Ok(val) => val,
1121                                    Err(e) => {
1122                                        error!(
1123                                            "Failed to parse {:?} into CA chain!\nError: {:?}",
1124                                            ca_cert_path, e
1125                                        );
1126                                        return ExitCode::FAILURE;
1127                                    }
1128                                };
1129
1130                            // Need at least 2 certs for the leaf + chain. We skip the leaf.
1131                            for cert in ca_chain_parsed.into_iter().skip(1) {
1132                                client = client.add_root_certificate(cert)
1133                            }
1134                            client
1135                        }
1136                        false => {
1137                            warn!(
1138                                "Couldn't find ca cert {} but carrying on...",
1139                                tls_config.chain.display()
1140                            );
1141                            client
1142                        }
1143                    }
1144                }
1145            };
1146            #[allow(clippy::unwrap_used)]
1147            let client = client.build().unwrap();
1148
1149            let req = match client.get(&healthcheck_url).send().await {
1150                Ok(val) => val,
1151                Err(error) => {
1152                    let error_message = {
1153                        if error.is_timeout() {
1154                            format!("Timeout connecting to url={healthcheck_url}")
1155                        } else if error.is_connect() {
1156                            format!("Connection failed: {error}")
1157                        } else {
1158                            format!("Failed to complete healthcheck: {error:?}")
1159                        }
1160                    };
1161                    error!("CRITICAL: {error_message}");
1162                    return ExitCode::FAILURE;
1163                }
1164            };
1165            debug!("Request: {req:?}");
1166            let output_mode: ConsoleOutputMode = sopt.commonopts.output_mode.to_owned().into();
1167            match output_mode {
1168                ConsoleOutputMode::JSON => {
1169                    println!("{{\"result\":\"OK\"}}")
1170                }
1171                ConsoleOutputMode::Text => {
1172                    info!("OK")
1173                }
1174            }
1175        }
1176        KanidmdOpt::Version(_) => {}
1177    }
1178    ExitCode::SUCCESS
1179}