kanidmd/
main.rs

1#![deny(warnings)]
2#![warn(unused_extern_crates)]
3#![deny(clippy::todo)]
4#![deny(clippy::unimplemented)]
5#![deny(clippy::unwrap_used)]
6#![deny(clippy::expect_used)]
7#![deny(clippy::panic)]
8#![deny(clippy::unreachable)]
9#![deny(clippy::await_holding_lock)]
10#![deny(clippy::needless_pass_by_value)]
11#![deny(clippy::trivially_copy_pass_by_ref)]
12
13#[cfg(not(any(feature = "dhat-heap", target_os = "illumos")))]
14#[global_allocator]
15static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
16
17#[cfg(feature = "dhat-heap")]
18#[global_allocator]
19static ALLOC: dhat::Alloc = dhat::Alloc;
20
21use std::fs::{metadata, File};
22// This works on both unix and windows.
23use fs4::fs_std::FileExt;
24use kanidm_proto::messages::ConsoleOutputMode;
25use sketching::otel::TracingPipelineGuard;
26use std::io::Read;
27#[cfg(target_family = "unix")]
28use std::os::unix::fs::MetadataExt;
29use std::path::PathBuf;
30use std::process::ExitCode;
31
32use clap::{Args, Parser, Subcommand};
33use futures::{SinkExt, StreamExt};
34#[cfg(not(target_family = "windows"))] // not needed for windows builds
35use kanidm_utils_users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid};
36use kanidmd_core::admin::{
37    AdminTaskRequest, AdminTaskResponse, ClientCodec, ProtoDomainInfo,
38    ProtoDomainUpgradeCheckReport, ProtoDomainUpgradeCheckStatus,
39};
40use kanidmd_core::config::{CliConfig, Configuration, EnvironmentConfig, ServerConfigUntagged};
41use kanidmd_core::{
42    backup_server_core, cert_generate_core, create_server_core, dbscan_get_id2entry_core,
43    dbscan_list_id2entry_core, dbscan_list_index_analysis_core, dbscan_list_index_core,
44    dbscan_list_indexes_core, dbscan_list_quarantined_core, dbscan_quarantine_id2entry_core,
45    dbscan_restore_quarantined_core, domain_rename_core, reindex_server_core, restore_server_core,
46    vacuum_server_core, verify_server_core,
47};
48use sketching::tracing_forest::util::*;
49use tokio::net::UnixStream;
50use tokio_util::codec::Framed;
51#[cfg(target_family = "windows")] // for windows builds
52use whoami;
53
54include!("./opt.rs");
55
56impl KanidmdOpt {
57    fn commonopt(&self) -> &CommonOpt {
58        match self {
59            KanidmdOpt::Server(sopt)
60            | KanidmdOpt::CertGenerate(sopt)
61            | KanidmdOpt::ConfigTest(sopt)
62            | KanidmdOpt::DbScan {
63                commands: DbScanOpt::ListIndexes(sopt),
64            }
65            | KanidmdOpt::DbScan {
66                commands: DbScanOpt::ListId2Entry(sopt),
67            }
68            | KanidmdOpt::DbScan {
69                commands: DbScanOpt::ListIndexAnalysis(sopt),
70            } => sopt,
71            KanidmdOpt::Database {
72                commands: DbCommands::Backup(bopt),
73            } => &bopt.commonopts,
74            KanidmdOpt::Database {
75                commands: DbCommands::Restore(ropt),
76            } => &ropt.commonopts,
77            KanidmdOpt::DbScan {
78                commands: DbScanOpt::QuarantineId2Entry { commonopts, .. },
79            }
80            | KanidmdOpt::DbScan {
81                commands: DbScanOpt::ListQuarantined { commonopts },
82            }
83            | KanidmdOpt::DbScan {
84                commands: DbScanOpt::RestoreQuarantined { commonopts, .. },
85            }
86            | KanidmdOpt::ShowReplicationCertificate { commonopts }
87            | KanidmdOpt::RenewReplicationCertificate { commonopts }
88            | KanidmdOpt::RefreshReplicationConsumer { commonopts, .. } => commonopts,
89            KanidmdOpt::RecoverAccount { commonopts, .. } => commonopts,
90            KanidmdOpt::DbScan {
91                commands: DbScanOpt::ListIndex(dopt),
92            } => &dopt.commonopts,
93            KanidmdOpt::DbScan {
94                commands: DbScanOpt::GetId2Entry(dopt),
95            } => &dopt.commonopts,
96            KanidmdOpt::DomainSettings {
97                commands: DomainSettingsCmds::Show { commonopts },
98            }
99            | KanidmdOpt::DomainSettings {
100                commands: DomainSettingsCmds::Change { commonopts },
101            }
102            | KanidmdOpt::DomainSettings {
103                commands: DomainSettingsCmds::UpgradeCheck { commonopts },
104            }
105            | KanidmdOpt::DomainSettings {
106                commands: DomainSettingsCmds::Raise { commonopts },
107            }
108            | KanidmdOpt::DomainSettings {
109                commands: DomainSettingsCmds::Remigrate { commonopts, .. },
110            } => commonopts,
111            KanidmdOpt::Database {
112                commands: DbCommands::Verify(sopt),
113            }
114            | KanidmdOpt::Database {
115                commands: DbCommands::Reindex(sopt),
116            } => sopt,
117            KanidmdOpt::Database {
118                commands: DbCommands::Vacuum(copt),
119            } => copt,
120            KanidmdOpt::HealthCheck(hcopt) => &hcopt.commonopts,
121            KanidmdOpt::Version(copt) => copt,
122        }
123    }
124}
125
126/// Get information on the windows username
127#[cfg(target_family = "windows")]
128fn get_user_details_windows() {
129    eprintln!(
130        "Running on windows, current username is: {:?}",
131        whoami::username()
132    );
133}
134
135async fn submit_admin_req(path: &str, req: AdminTaskRequest, output_mode: ConsoleOutputMode) {
136    // Connect to the socket.
137    let stream = match UnixStream::connect(path).await {
138        Ok(s) => s,
139        Err(e) => {
140            error!(err = ?e, %path, "Unable to connect to socket path");
141            let diag = kanidm_lib_file_permissions::diagnose_path(path.as_ref());
142            info!(%diag);
143            return;
144        }
145    };
146
147    let mut reqs = Framed::new(stream, ClientCodec);
148
149    if let Err(e) = reqs.send(req).await {
150        error!(err = ?e, "Unable to send request");
151        return;
152    };
153
154    if let Err(e) = reqs.flush().await {
155        error!(err = ?e, "Unable to flush request");
156        return;
157    }
158
159    trace!("flushed, waiting ...");
160
161    match reqs.next().await {
162        Some(Ok(AdminTaskResponse::RecoverAccount { password })) => match output_mode {
163            ConsoleOutputMode::JSON => {
164                let json_output = serde_json::json!({
165                    "password": password
166                });
167                println!("{}", json_output);
168            }
169            ConsoleOutputMode::Text => {
170                info!(new_password = ?password)
171            }
172        },
173        Some(Ok(AdminTaskResponse::ShowReplicationCertificate { cert })) => match output_mode {
174            ConsoleOutputMode::JSON => {
175                println!("{{\"certificate\":\"{}\"}}", cert)
176            }
177            ConsoleOutputMode::Text => {
178                info!(certificate = ?cert)
179            }
180        },
181
182        Some(Ok(AdminTaskResponse::DomainUpgradeCheck { report })) => {
183            match output_mode {
184                ConsoleOutputMode::JSON => {
185                    let json_output = serde_json::json!({
186                        "domain_upgrade_check": report
187                    });
188                    println!("{}", json_output);
189                }
190                ConsoleOutputMode::Text => {
191                    let ProtoDomainUpgradeCheckReport {
192                        name,
193                        uuid,
194                        current_level,
195                        upgrade_level,
196                        report_items,
197                    } = report;
198
199                    info!("domain_name            : {}", name);
200                    info!("domain_uuid            : {}", uuid);
201                    info!("domain_current_level   : {}", current_level);
202                    info!("domain_upgrade_level   : {}", upgrade_level);
203
204                    for item in report_items {
205                        info!("------------------------");
206                        match item.status {
207                            ProtoDomainUpgradeCheckStatus::Pass6To7Gidnumber => {
208                                info!("upgrade_item           : gidnumber range validity");
209                                debug!("from_level             : {}", item.from_level);
210                                debug!("to_level               : {}", item.to_level);
211                                info!("status                 : PASS");
212                            }
213                            ProtoDomainUpgradeCheckStatus::Fail6To7Gidnumber => {
214                                info!("upgrade_item           : gidnumber range validity");
215                                debug!("from_level             : {}", item.from_level);
216                                debug!("to_level               : {}", item.to_level);
217                                info!("status                 : FAIL");
218                                info!("description            : The automatically allocated gidnumbers for posix accounts was found to allocate numbers into systemd-reserved ranges. These can no longer be used.");
219                                info!("action                 : Modify the gidnumber of affected entries so that they are in the range 65536 to 524287 OR reset the gidnumber to cause it to automatically regenerate.");
220                                for entry_id in item.affected_entries {
221                                    info!("affected_entry         : {}", entry_id);
222                                }
223                            }
224                            // ===========
225                            ProtoDomainUpgradeCheckStatus::Pass7To8SecurityKeys => {
226                                info!("upgrade_item           : security key usage");
227                                debug!("from_level             : {}", item.from_level);
228                                debug!("to_level               : {}", item.to_level);
229                                info!("status                 : PASS");
230                            }
231                            ProtoDomainUpgradeCheckStatus::Fail7To8SecurityKeys => {
232                                info!("upgrade_item           : security key usage");
233                                debug!("from_level             : {}", item.from_level);
234                                debug!("to_level               : {}", item.to_level);
235                                info!("status                 : FAIL");
236                                info!("description            : Security keys no longer function as a second factor due to the introduction of CTAP2 and greater forcing PIN interactions.");
237                                info!("action                 : Modify the accounts in question to remove their security key and add it as a passkey or enable TOTP");
238                                for entry_id in item.affected_entries {
239                                    info!("affected_entry         : {}", entry_id);
240                                }
241                            }
242                            // ===========
243                            ProtoDomainUpgradeCheckStatus::Pass7To8Oauth2StrictRedirectUri => {
244                                info!("upgrade_item           : oauth2 strict redirect uri enforcement");
245                                debug!("from_level             : {}", item.from_level);
246                                debug!("to_level               : {}", item.to_level);
247                                info!("status                 : PASS");
248                            }
249                            ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri => {
250                                info!("upgrade_item           : oauth2 strict redirect uri enforcement");
251                                debug!("from_level             : {}", item.from_level);
252                                debug!("to_level               : {}", item.to_level);
253                                info!("status                 : FAIL");
254                                info!("description            : To harden against possible public client open redirection vulnerabilities, redirect uris must now be registered ahead of time and are validated rather than the former origin verification process.");
255                                info!("action                 : Verify the redirect uri's for OAuth2 clients and then enable strict-redirect-uri on each client.");
256                                for entry_id in item.affected_entries {
257                                    info!("affected_entry         : {}", entry_id);
258                                }
259                            }
260                        }
261                    }
262                }
263            }
264        }
265
266        Some(Ok(AdminTaskResponse::DomainRaise { level })) => match output_mode {
267            ConsoleOutputMode::JSON => {
268                eprintln!("{{\"success\":\"{}\"}}", level)
269            }
270            ConsoleOutputMode::Text => {
271                info!("success - raised domain level to {}", level)
272            }
273        },
274        Some(Ok(AdminTaskResponse::DomainShow { domain_info })) => match output_mode {
275            ConsoleOutputMode::JSON => {
276                let json_output = serde_json::json!({
277                    "domain_info": domain_info
278                });
279                println!("{}", json_output);
280            }
281            ConsoleOutputMode::Text => {
282                let ProtoDomainInfo {
283                    name,
284                    displayname,
285                    uuid,
286                    level,
287                } = domain_info;
288
289                info!("domain_name   : {}", name);
290                info!("domain_display: {}", displayname);
291                info!("domain_uuid   : {}", uuid);
292                info!("domain_level  : {}", level);
293            }
294        },
295        Some(Ok(AdminTaskResponse::Success)) => match output_mode {
296            ConsoleOutputMode::JSON => {
297                eprintln!("\"success\"")
298            }
299            ConsoleOutputMode::Text => {
300                info!("success")
301            }
302        },
303        Some(Ok(AdminTaskResponse::Error)) => match output_mode {
304            ConsoleOutputMode::JSON => {
305                eprintln!("\"error\"")
306            }
307            ConsoleOutputMode::Text => {
308                info!("Error - you should inspect the logs.")
309            }
310        },
311        Some(Err(err)) => {
312            error!(?err, "Error during admin task operation");
313        }
314        None => {
315            error!("Error making request to admin socket");
316        }
317    }
318}
319
320/// Check what we're running as and various filesystem permissions.
321fn check_file_ownership(opt: &KanidmdParser) -> Result<(), ExitCode> {
322    // Get info about who we are.
323    #[cfg(target_family = "unix")]
324    let (cuid, ceuid) = {
325        let cuid = get_current_uid();
326        let ceuid = get_effective_uid();
327        let cgid = get_current_gid();
328        let cegid = get_effective_gid();
329
330        if cuid == 0 || ceuid == 0 || cgid == 0 || cegid == 0 {
331            warn!("This is running as uid == 0 (root) which may be a security risk.");
332            // eprintln!("ERROR: Refusing to run - this process must not operate as root.");
333            // std::process::exit(1);
334        }
335
336        if cuid != ceuid || cgid != cegid {
337            error!("{} != {} || {} != {}", cuid, ceuid, cgid, cegid);
338            error!("Refusing to run - uid and euid OR gid and egid must be consistent.");
339            return Err(ExitCode::FAILURE);
340        }
341        (cuid, ceuid)
342    };
343
344    if let Some(cfg_path) = opt.config_path() {
345        #[cfg(target_family = "unix")]
346        {
347            if let Some(cfg_meta) = match metadata(&cfg_path) {
348                Ok(m) => Some(m),
349                Err(e) => {
350                    error!(
351                        "Unable to read metadata for configuration file '{}' - {:?}",
352                        cfg_path.display(),
353                        e
354                    );
355                    // return ExitCxode::FAILURE;
356                    None
357                }
358            } {
359                if !kanidm_lib_file_permissions::readonly(&cfg_meta) {
360                    warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...",
361                        cfg_path.to_str().unwrap_or("invalid file path"));
362                }
363
364                if cfg_meta.mode() & 0o007 != 0 {
365                    warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...",
366                        cfg_path.to_str().unwrap_or("invalid file path")
367                        );
368                }
369
370                if cfg_meta.uid() == cuid || cfg_meta.uid() == ceuid {
371                    warn!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...",
372                        cfg_path.to_str().unwrap_or("invalid file path")
373                        );
374                }
375            }
376        }
377    }
378    Ok(())
379}
380
381// We have to do this because we can't use tracing until we've started the logging pipeline, and we can't start the logging pipeline until the tokio runtime's doing its thing.
382async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
383    // if we have a server config and it has an OTEL URL, then we'll start the logging pipeline now.
384
385    // TODO: only send to stderr when we're not in a TTY
386    let sub = match sketching::otel::start_logging_pipeline(
387        &config.otel_grpc_url,
388        config.log_level,
389        "kanidmd",
390    ) {
391        Err(err) => {
392            eprintln!("Error starting logger - {:} - Bailing on startup!", err);
393            return ExitCode::FAILURE;
394        }
395        Ok(val) => val,
396    };
397
398    if let Err(err) = tracing::subscriber::set_global_default(sub).map_err(|err| {
399        eprintln!("Error starting logger - {:} - Bailing on startup!", err);
400        ExitCode::FAILURE
401    }) {
402        return err;
403    };
404
405    // ************************************************
406    // HERE'S WHERE YOU CAN START USING THE LOGGER
407    // ************************************************
408
409    info!(version = %env!("KANIDM_PKG_VERSION"), "Starting Kanidmd");
410
411    // guard which shuts down the logging/tracing providers when we close out
412    let _otelguard = TracingPipelineGuard {};
413
414    // ===========================================================================
415    // Start pre-run checks
416
417    // Check the permissions of the files from the configuration.
418    if let Err(err) = check_file_ownership(&opt) {
419        return err;
420    };
421
422    if let Some(db_path) = config.db_path.as_ref() {
423        let db_pathbuf = db_path.to_path_buf();
424        // We can't check the db_path permissions because it may not exist yet!
425        if let Some(db_parent_path) = db_pathbuf.parent() {
426            if !db_parent_path.exists() {
427                warn!(
428                    "DB folder {} may not exist, server startup may FAIL!",
429                    db_parent_path.to_str().unwrap_or("invalid file path")
430                );
431                let diag = kanidm_lib_file_permissions::diagnose_path(&db_pathbuf);
432                info!(%diag);
433            }
434
435            let db_par_path_buf = db_parent_path.to_path_buf();
436            let i_meta = match metadata(&db_par_path_buf) {
437                Ok(m) => m,
438                Err(e) => {
439                    error!(
440                        "Unable to read metadata for database folder '{}' - {:?}",
441                        &db_par_path_buf.to_str().unwrap_or("invalid file path"),
442                        e
443                    );
444                    return ExitCode::FAILURE;
445                }
446            };
447            if !i_meta.is_dir() {
448                error!(
449                    "ERROR: Refusing to run - DB folder {} may not be a directory",
450                    db_par_path_buf.to_str().unwrap_or("invalid file path")
451                );
452                return ExitCode::FAILURE;
453            }
454
455            if kanidm_lib_file_permissions::readonly(&i_meta) {
456                warn!("WARNING: DB folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", db_par_path_buf.to_str().unwrap_or("invalid file path"));
457            }
458            #[cfg(not(target_os = "windows"))]
459            if i_meta.mode() & 0o007 != 0 {
460                warn!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str().unwrap_or("invalid file path"));
461            }
462        }
463    } else {
464        error!("No db_path set in configuration, server startup will FAIL!");
465        return ExitCode::FAILURE;
466    }
467
468    let lock_was_setup = match &opt.commands {
469        // we aren't going to touch the DB so we can carry on
470        KanidmdOpt::ShowReplicationCertificate { .. }
471        | KanidmdOpt::RenewReplicationCertificate { .. }
472        | KanidmdOpt::RefreshReplicationConsumer { .. }
473        | KanidmdOpt::RecoverAccount { .. }
474        | KanidmdOpt::HealthCheck(_) => None,
475        _ => {
476            // Okay - Lets now create our lock and go.
477            #[allow(clippy::expect_used)]
478            let klock_path = match config.db_path.clone() {
479                Some(val) => val.with_extension("klock"),
480                None => std::env::temp_dir().join("kanidmd.klock"),
481            };
482
483            let flock = match File::create(&klock_path) {
484                Ok(flock) => flock,
485                Err(err) => {
486                    error!(
487                        "ERROR: Refusing to start - unable to create kanidmd exclusive lock at {}",
488                        klock_path.display()
489                    );
490                    error!(?err);
491                    return ExitCode::FAILURE;
492                }
493            };
494
495            match flock.try_lock_exclusive() {
496                Ok(true) => debug!("Acquired kanidm exclusive lock"),
497                Ok(false) => {
498                    error!(
499                        "ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {}",
500                        klock_path.display()
501                    );
502                    error!("Is another kanidmd process running?");
503                    return ExitCode::FAILURE;
504                }
505                Err(err) => {
506                    error!(
507                        "ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {}",
508                        klock_path.display()
509                    );
510                    error!(?err);
511                    return ExitCode::FAILURE;
512                }
513            };
514
515            Some(klock_path)
516        }
517    };
518
519    let result_code = kanidm_main(config, opt).await;
520
521    if let Some(klock_path) = lock_was_setup {
522        if let Err(reason) = std::fs::remove_file(&klock_path) {
523            warn!(
524                ?reason,
525                "WARNING: Unable to clean up kanidmd exclusive lock at {}",
526                klock_path.display()
527            );
528        }
529    }
530
531    result_code
532}
533
534fn main() -> ExitCode {
535    // On linux when debug assertions are disabled, prevent ptrace
536    // from attaching to us.
537    #[cfg(all(target_os = "linux", not(debug_assertions)))]
538    if let Err(code) = prctl::set_dumpable(false) {
539        println!(
540            "CRITICAL: Unable to set prctl flags, which breaches our security model, quitting! {:?}", code
541        );
542        return ExitCode::FAILURE;
543    }
544
545    // We need enough backtrace depth to find leak sources if they exist.
546    #[cfg(feature = "dhat-heap")]
547    let _profiler = dhat::Profiler::builder().trim_backtraces(Some(40)).build();
548
549    // Read CLI args, determine what the user has asked us to do.
550    let opt = KanidmdParser::parse();
551
552    // print the app version and bail
553    if let KanidmdOpt::Version(_) = &opt.commands {
554        println!("kanidmd {}", env!("KANIDM_PKG_VERSION"));
555        return ExitCode::SUCCESS;
556    };
557
558    if env!("KANIDM_SERVER_CONFIG_PATH").is_empty() {
559        println!("CRITICAL: Kanidmd was not built correctly and is missing a valid KANIDM_SERVER_CONFIG_PATH value");
560        return ExitCode::FAILURE;
561    }
562
563    let default_config_path = PathBuf::from(env!("KANIDM_SERVER_CONFIG_PATH"));
564
565    let maybe_config_path = if let Some(p) = opt.config_path() {
566        Some(p)
567    } else {
568        // The user didn't ask for a file, lets check if the default path exists?
569        if default_config_path.exists() {
570            // It does, lets use it.
571            Some(default_config_path)
572        } else {
573            // No default config, and no config specified, lets assume the user
574            // has selected environment variables.
575            None
576        }
577    };
578
579    let maybe_sconfig = if let Some(config_path) = maybe_config_path {
580        match ServerConfigUntagged::new(config_path) {
581            Ok(c) => Some(c),
582            Err(err) => {
583                eprintln!("ERROR: Configuration Parse Failure: {:?}", err);
584                return ExitCode::FAILURE;
585            }
586        }
587    } else {
588        eprintln!("WARNING: No configuration path was provided, relying on environment variables.");
589        None
590    };
591
592    let envconfig = match EnvironmentConfig::new() {
593        Ok(ec) => ec,
594        Err(err) => {
595            eprintln!("ERROR: Environment Configuration Parse Failure: {:?}", err);
596            return ExitCode::FAILURE;
597        }
598    };
599
600    let cli_config = CliConfig {
601        output_mode: Some(opt.commands.commonopt().output_mode.to_owned().into()),
602    };
603
604    let is_server = matches!(&opt.commands, KanidmdOpt::Server(_));
605
606    let config = Configuration::build()
607        .add_env_config(envconfig)
608        .add_opt_toml_config(maybe_sconfig)
609        // We always set threads to 1 unless it's the main server.
610        .add_cli_config(cli_config)
611        .is_server_mode(is_server)
612        .finish();
613
614    let Some(config) = config else {
615        eprintln!(
616            "ERROR: Unable to build server configuration from provided configuration inputs."
617        );
618        return ExitCode::FAILURE;
619    };
620
621    // ===========================================================================
622    // Config ready
623
624    // Get information on the windows username
625    #[cfg(target_family = "windows")]
626    get_user_details_windows();
627
628    // Start the runtime
629    let maybe_rt = tokio::runtime::Builder::new_multi_thread()
630        .worker_threads(config.threads)
631        .enable_all()
632        .thread_name("kanidmd-thread-pool")
633        // .thread_stack_size(8 * 1024 * 1024)
634        // If we want a hook for thread start.
635        // .on_thread_start()
636        // In future, we can stop the whole process if a panic occurs.
637        // .unhandled_panic(tokio::runtime::UnhandledPanic::ShutdownRuntime)
638        .build();
639
640    let rt = match maybe_rt {
641        Ok(rt) => rt,
642        Err(err) => {
643            eprintln!("CRITICAL: Unable to start runtime! {:?}", err);
644            return ExitCode::FAILURE;
645        }
646    };
647
648    rt.block_on(start_daemon(opt, config))
649}
650
651/// Build and execute the main server. The ServerConfig are the configuration options
652/// that we are processing into the config for the main server.
653async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
654    match &opt.commands {
655        KanidmdOpt::Server(_sopt) | KanidmdOpt::ConfigTest(_sopt) => {
656            let config_test = matches!(&opt.commands, KanidmdOpt::ConfigTest(_));
657            if config_test {
658                info!("Running in server configuration test mode ...");
659            } else {
660                info!("Running in server mode ...");
661            };
662
663            // Verify the TLs configs.
664            if let Some(tls_config) = config.tls_config.as_ref() {
665                {
666                    let i_meta = match metadata(&tls_config.chain) {
667                        Ok(m) => m,
668                        Err(e) => {
669                            error!(
670                                "Unable to read metadata for TLS chain file '{}' - {:?}",
671                                tls_config.chain.display(),
672                                e
673                            );
674                            let diag =
675                                kanidm_lib_file_permissions::diagnose_path(&tls_config.chain);
676                            info!(%diag);
677                            return ExitCode::FAILURE;
678                        }
679                    };
680                    if !kanidm_lib_file_permissions::readonly(&i_meta) {
681                        warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.chain.display());
682                    }
683                }
684
685                {
686                    let i_meta = match metadata(&tls_config.key) {
687                        Ok(m) => m,
688                        Err(e) => {
689                            error!(
690                                "Unable to read metadata for TLS key file '{}' - {:?}",
691                                tls_config.key.display(),
692                                e
693                            );
694                            let diag = kanidm_lib_file_permissions::diagnose_path(&tls_config.key);
695                            info!(%diag);
696                            return ExitCode::FAILURE;
697                        }
698                    };
699                    if !kanidm_lib_file_permissions::readonly(&i_meta) {
700                        warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.key.display());
701                    }
702                    #[cfg(not(target_os = "windows"))]
703                    if i_meta.mode() & 0o007 != 0 {
704                        warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", tls_config.key.display());
705                    }
706                }
707
708                if let Some(ca_dir) = tls_config.client_ca.as_ref() {
709                    // check that the TLS client CA config option is what we expect
710                    let ca_dir_path = PathBuf::from(&ca_dir);
711                    if !ca_dir_path.exists() {
712                        error!(
713                            "TLS CA folder {} does not exist, server startup will FAIL!",
714                            ca_dir.display()
715                        );
716                        let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
717                        info!(%diag);
718                    }
719
720                    let i_meta = match metadata(&ca_dir_path) {
721                        Ok(m) => m,
722                        Err(e) => {
723                            error!(
724                                "Unable to read metadata for '{}' - {:?}",
725                                ca_dir.display(),
726                                e
727                            );
728                            let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
729                            info!(%diag);
730                            return ExitCode::FAILURE;
731                        }
732                    };
733                    if !i_meta.is_dir() {
734                        error!(
735                            "ERROR: Refusing to run - TLS Client CA folder {} may not be a directory",
736                            ca_dir.display()
737                        );
738                        return ExitCode::FAILURE;
739                    }
740                    if kanidm_lib_file_permissions::readonly(&i_meta) {
741                        warn!("WARNING: TLS Client CA folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", ca_dir.display());
742                    }
743                    #[cfg(not(target_os = "windows"))]
744                    if i_meta.mode() & 0o007 != 0 {
745                        warn!("WARNING: TLS Client CA folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", ca_dir.display());
746                    }
747                }
748            }
749
750            let sctx = create_server_core(config, config_test).await;
751            if !config_test {
752                // On linux, notify systemd.
753                #[cfg(target_os = "linux")]
754                {
755                    let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]);
756                    let _ = sd_notify::notify(
757                        true,
758                        &[sd_notify::NotifyState::Status("Started Kanidm 🦀")],
759                    );
760                };
761
762                match sctx {
763                    Ok(mut sctx) => {
764                        loop {
765                            #[cfg(target_family = "unix")]
766                            {
767                                let mut listener = sctx.subscribe();
768                                tokio::select! {
769                                    Ok(()) = tokio::signal::ctrl_c() => {
770                                        break
771                                    }
772                                    Some(()) = async move {
773                                        let sigterm = tokio::signal::unix::SignalKind::terminate();
774                                        #[allow(clippy::unwrap_used)]
775                                        tokio::signal::unix::signal(sigterm).unwrap().recv().await
776                                    } => {
777                                        break
778                                    }
779                                    Some(()) = async move {
780                                        let sigterm = tokio::signal::unix::SignalKind::alarm();
781                                        #[allow(clippy::unwrap_used)]
782                                        tokio::signal::unix::signal(sigterm).unwrap().recv().await
783                                    } => {
784                                        // Ignore
785                                    }
786                                    Some(()) = async move {
787                                        let sigterm = tokio::signal::unix::SignalKind::hangup();
788                                        #[allow(clippy::unwrap_used)]
789                                        tokio::signal::unix::signal(sigterm).unwrap().recv().await
790                                    } => {
791                                        // Reload TLS certificates
792                                        // systemd has a special reload handler for this.
793                                        #[cfg(target_os = "linux")]
794                                        {
795                                            if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
796                                                let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Reloading, monotonic_usec]);
797                                                let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reloading ...")]);
798                                            } else {
799                                                error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
800                                            };
801                                        }
802
803                                        sctx.tls_acceptor_reload().await;
804
805                                        // Systemd freaks out if you send the ready state too fast after the
806                                        // reload state and can kill Kanidmd as a result.
807                                        tokio::time::sleep(std::time::Duration::from_secs(5)).await;
808
809                                        #[cfg(target_os = "linux")]
810                                        {
811                                            if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
812                                                let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready, monotonic_usec]);
813                                                let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reload Success")]);
814                                            } else {
815                                                error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
816                                            };
817                                        }
818
819                                        info!("Reload complete");
820                                    }
821                                    Some(()) = async move {
822                                        let sigterm = tokio::signal::unix::SignalKind::user_defined1();
823                                        #[allow(clippy::unwrap_used)]
824                                        tokio::signal::unix::signal(sigterm).unwrap().recv().await
825                                    } => {
826                                        // Ignore
827                                    }
828                                    Some(()) = async move {
829                                        let sigterm = tokio::signal::unix::SignalKind::user_defined2();
830                                        #[allow(clippy::unwrap_used)]
831                                        tokio::signal::unix::signal(sigterm).unwrap().recv().await
832                                    } => {
833                                        // Ignore
834                                    }
835                                    // we got a message on thr broadcast from somewhere else
836                                    Ok(msg) = async move {
837                                        listener.recv().await
838                                    } => {
839                                        debug!("Main loop received message: {:?}", msg);
840                                        break
841                                    }
842                                }
843                            }
844                            #[cfg(target_family = "windows")]
845                            {
846                                tokio::select! {
847                                    Ok(()) = tokio::signal::ctrl_c() => {
848                                        break
849                                    }
850                                }
851                            }
852                        }
853                        info!("Signal received, shutting down");
854                        // Send a broadcast that we are done.
855                        sctx.shutdown().await;
856                    }
857                    Err(_) => {
858                        error!("Failed to start server core!");
859                        // We may need to return an exit code here, but that may take some re-architecting
860                        // to ensure we drop everything cleanly.
861                        return ExitCode::FAILURE;
862                    }
863                }
864                info!("Stopped 🛑 ");
865            }
866        }
867        KanidmdOpt::CertGenerate(_sopt) => {
868            info!("Running in certificate generate mode ...");
869            cert_generate_core(&config);
870        }
871        KanidmdOpt::Database {
872            commands: DbCommands::Backup(bopt),
873        } => {
874            info!("Running in backup mode ...");
875            backup_server_core(&config, &bopt.path);
876        }
877        KanidmdOpt::Database {
878            commands: DbCommands::Restore(ropt),
879        } => {
880            info!("Running in restore mode ...");
881            restore_server_core(&config, &ropt.path).await;
882        }
883        KanidmdOpt::Database {
884            commands: DbCommands::Verify(_vopt),
885        } => {
886            info!("Running in db verification mode ...");
887            verify_server_core(&config).await;
888        }
889        KanidmdOpt::ShowReplicationCertificate { commonopts } => {
890            info!("Running show replication certificate ...");
891            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
892            submit_admin_req(
893                config.adminbindpath.as_str(),
894                AdminTaskRequest::ShowReplicationCertificate,
895                output_mode,
896            )
897            .await;
898        }
899        KanidmdOpt::RenewReplicationCertificate { commonopts } => {
900            info!("Running renew replication certificate ...");
901            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
902            submit_admin_req(
903                config.adminbindpath.as_str(),
904                AdminTaskRequest::RenewReplicationCertificate,
905                output_mode,
906            )
907            .await;
908        }
909        KanidmdOpt::RefreshReplicationConsumer {
910            commonopts,
911            proceed,
912        } => {
913            info!("Running refresh replication consumer ...");
914            if !proceed {
915                error!("Unwilling to proceed. Check --help.");
916            } else {
917                let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
918                submit_admin_req(
919                    config.adminbindpath.as_str(),
920                    AdminTaskRequest::RefreshReplicationConsumer,
921                    output_mode,
922                )
923                .await;
924            }
925        }
926        KanidmdOpt::RecoverAccount { name, commonopts } => {
927            info!("Running account recovery ...");
928            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
929            submit_admin_req(
930                config.adminbindpath.as_str(),
931                AdminTaskRequest::RecoverAccount {
932                    name: name.to_owned(),
933                },
934                output_mode,
935            )
936            .await;
937        }
938        KanidmdOpt::Database {
939            commands: DbCommands::Reindex(_copt),
940        } => {
941            info!("Running in reindex mode ...");
942            reindex_server_core(&config).await;
943        }
944        KanidmdOpt::DbScan {
945            commands: DbScanOpt::ListIndexes(_),
946        } => {
947            info!("👀 db scan - list indexes");
948            dbscan_list_indexes_core(&config);
949        }
950        KanidmdOpt::DbScan {
951            commands: DbScanOpt::ListId2Entry(_),
952        } => {
953            info!("👀 db scan - list id2entry");
954            dbscan_list_id2entry_core(&config);
955        }
956        KanidmdOpt::DbScan {
957            commands: DbScanOpt::ListIndexAnalysis(_),
958        } => {
959            info!("👀 db scan - list index analysis");
960            dbscan_list_index_analysis_core(&config);
961        }
962        KanidmdOpt::DbScan {
963            commands: DbScanOpt::ListIndex(dopt),
964        } => {
965            info!("👀 db scan - list index content - {}", dopt.index_name);
966            dbscan_list_index_core(&config, dopt.index_name.as_str());
967        }
968        KanidmdOpt::DbScan {
969            commands: DbScanOpt::GetId2Entry(dopt),
970        } => {
971            info!("👀 db scan - get id2 entry - {}", dopt.id);
972            dbscan_get_id2entry_core(&config, dopt.id);
973        }
974
975        KanidmdOpt::DbScan {
976            commands: DbScanOpt::QuarantineId2Entry { id, commonopts: _ },
977        } => {
978            info!("☣️  db scan - quarantine id2 entry - {}", id);
979            dbscan_quarantine_id2entry_core(&config, *id);
980        }
981
982        KanidmdOpt::DbScan {
983            commands: DbScanOpt::ListQuarantined { commonopts: _ },
984        } => {
985            info!("☣️  db scan - list quarantined");
986            dbscan_list_quarantined_core(&config);
987        }
988
989        KanidmdOpt::DbScan {
990            commands: DbScanOpt::RestoreQuarantined { id, commonopts: _ },
991        } => {
992            info!("☣️  db scan - restore quarantined entry - {}", id);
993            dbscan_restore_quarantined_core(&config, *id);
994        }
995
996        KanidmdOpt::DomainSettings {
997            commands: DomainSettingsCmds::Change { .. },
998        } => {
999            info!("Running in domain name change mode ... this may take a long time ...");
1000            domain_rename_core(&config).await;
1001        }
1002
1003        KanidmdOpt::DomainSettings {
1004            commands: DomainSettingsCmds::Show { commonopts },
1005        } => {
1006            info!("Running domain show ...");
1007            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
1008            submit_admin_req(
1009                config.adminbindpath.as_str(),
1010                AdminTaskRequest::DomainShow,
1011                output_mode,
1012            )
1013            .await;
1014        }
1015
1016        KanidmdOpt::DomainSettings {
1017            commands: DomainSettingsCmds::UpgradeCheck { commonopts },
1018        } => {
1019            info!("Running domain upgrade check ...");
1020            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
1021            submit_admin_req(
1022                config.adminbindpath.as_str(),
1023                AdminTaskRequest::DomainUpgradeCheck,
1024                output_mode,
1025            )
1026            .await;
1027        }
1028
1029        KanidmdOpt::DomainSettings {
1030            commands: DomainSettingsCmds::Raise { commonopts },
1031        } => {
1032            info!("Running domain raise ...");
1033            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
1034            submit_admin_req(
1035                config.adminbindpath.as_str(),
1036                AdminTaskRequest::DomainRaise,
1037                output_mode,
1038            )
1039            .await;
1040        }
1041
1042        KanidmdOpt::DomainSettings {
1043            commands: DomainSettingsCmds::Remigrate { commonopts, level },
1044        } => {
1045            info!("⚠️  Running domain remigrate ...");
1046            let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
1047            submit_admin_req(
1048                config.adminbindpath.as_str(),
1049                AdminTaskRequest::DomainRemigrate { level: *level },
1050                output_mode,
1051            )
1052            .await;
1053        }
1054
1055        KanidmdOpt::Database {
1056            commands: DbCommands::Vacuum(_copt),
1057        } => {
1058            info!("Running in vacuum mode ...");
1059            vacuum_server_core(&config);
1060        }
1061        KanidmdOpt::HealthCheck(sopt) => {
1062            debug!("{sopt:?}");
1063
1064            let healthcheck_url = match &sopt.check_origin {
1065                true => format!("{}/status", config.origin),
1066                false => {
1067                    // the replace covers when you specify an ipv6-capable "all" address
1068                    format!(
1069                        "https://{}/status",
1070                        config.address.replace("[::]", "localhost")
1071                    )
1072                }
1073            };
1074
1075            info!("Checking {healthcheck_url}");
1076
1077            let mut client = reqwest::ClientBuilder::new()
1078                .danger_accept_invalid_certs(!sopt.verify_tls)
1079                .danger_accept_invalid_hostnames(!sopt.verify_tls)
1080                .https_only(true);
1081
1082            client = match &config.tls_config {
1083                None => client,
1084                Some(tls_config) => {
1085                    debug!(
1086                        "Trying to load {} to build a CA cert path",
1087                        tls_config.chain.display()
1088                    );
1089                    // if the ca_cert file exists, then we'll use it
1090                    let ca_cert_path = tls_config.chain.clone();
1091                    match ca_cert_path.exists() {
1092                        true => {
1093                            let mut cert_buf = Vec::new();
1094                            if let Err(err) = std::fs::File::open(&ca_cert_path)
1095                                .and_then(|mut file| file.read_to_end(&mut cert_buf))
1096                            {
1097                                error!(
1098                                    "Failed to read {:?} from filesystem: {:?}",
1099                                    ca_cert_path, err
1100                                );
1101                                return ExitCode::FAILURE;
1102                            }
1103
1104                            let ca_chain_parsed =
1105                                match reqwest::Certificate::from_pem_bundle(&cert_buf) {
1106                                    Ok(val) => val,
1107                                    Err(e) => {
1108                                        error!(
1109                                            "Failed to parse {:?} into CA chain!\nError: {:?}",
1110                                            ca_cert_path, e
1111                                        );
1112                                        return ExitCode::FAILURE;
1113                                    }
1114                                };
1115
1116                            // Need at least 2 certs for the leaf + chain. We skip the leaf.
1117                            for cert in ca_chain_parsed.into_iter().skip(1) {
1118                                client = client.add_root_certificate(cert)
1119                            }
1120                            client
1121                        }
1122                        false => {
1123                            warn!(
1124                                "Couldn't find ca cert {} but carrying on...",
1125                                tls_config.chain.display()
1126                            );
1127                            client
1128                        }
1129                    }
1130                }
1131            };
1132            #[allow(clippy::unwrap_used)]
1133            let client = client.build().unwrap();
1134
1135            let req = match client.get(&healthcheck_url).send().await {
1136                Ok(val) => val,
1137                Err(error) => {
1138                    let error_message = {
1139                        if error.is_timeout() {
1140                            format!("Timeout connecting to url={healthcheck_url}")
1141                        } else if error.is_connect() {
1142                            format!("Connection failed: {}", error)
1143                        } else {
1144                            format!("Failed to complete healthcheck: {:?}", error)
1145                        }
1146                    };
1147                    error!("CRITICAL: {error_message}");
1148                    return ExitCode::FAILURE;
1149                }
1150            };
1151            debug!("Request: {req:?}");
1152            let output_mode: ConsoleOutputMode = sopt.commonopts.output_mode.to_owned().into();
1153            match output_mode {
1154                ConsoleOutputMode::JSON => {
1155                    println!("{{\"result\":\"OK\"}}")
1156                }
1157                ConsoleOutputMode::Text => {
1158                    info!("OK")
1159                }
1160            }
1161        }
1162        KanidmdOpt::Version(_) => {}
1163    }
1164    ExitCode::SUCCESS
1165}