1#![deny(warnings)]
2#![warn(unused_extern_crates)]
3#![deny(clippy::todo)]
4#![deny(clippy::unimplemented)]
5#![deny(clippy::unwrap_used)]
6#![deny(clippy::expect_used)]
7#![deny(clippy::panic)]
8#![deny(clippy::unreachable)]
9#![deny(clippy::await_holding_lock)]
10#![deny(clippy::needless_pass_by_value)]
11#![deny(clippy::trivially_copy_pass_by_ref)]
12
13#[cfg(all(not(feature = "dhat-heap"), target_os = "linux"))]
14#[global_allocator]
15static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
16
17#[cfg(feature = "dhat-heap")]
18#[global_allocator]
19static ALLOC: dhat::Alloc = dhat::Alloc;
20
21#[cfg(target_family = "unix")]
22use std::os::unix::fs::MetadataExt;
23
24#[cfg(target_family = "unix")]
25use kanidm_utils_users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid};
26
27#[cfg(target_family = "windows")] use whoami;
29
30use std::fs::{metadata, File};
31use clap::{Args, Parser, Subcommand};
33use fs4::fs_std::FileExt;
34use futures::{SinkExt, StreamExt};
35use kanidmd_core::admin::{
36 AdminTaskRequest, AdminTaskResponse, ClientCodec, ProtoDomainInfo,
37 ProtoDomainUpgradeCheckReport, ProtoDomainUpgradeCheckStatus,
38};
39use kanidmd_core::config::{Configuration, ServerConfigUntagged};
40use kanidmd_core::{
41 backup_server_core, cert_generate_core, create_server_core, dbscan_get_id2entry_core,
42 dbscan_list_id2entry_core, dbscan_list_index_analysis_core, dbscan_list_index_core,
43 dbscan_list_indexes_core, dbscan_list_quarantined_core, dbscan_quarantine_id2entry_core,
44 dbscan_restore_quarantined_core, domain_rename_core, reindex_server_core, restore_server_core,
45 vacuum_server_core, verify_server_core, CoreAction,
46};
47use serde::Serialize;
48use sketching::pipeline::TracingPipelineGuard;
49use sketching::tracing_forest::util::*;
50use std::fmt;
51use std::io::Read;
52use std::path::PathBuf;
53use std::process::ExitCode;
54use tokio::net::UnixStream;
55use tokio_util::codec::Framed;
56
57include!("./opt.rs");
58
59#[cfg(target_family = "windows")]
61fn get_user_details_windows() {
62 eprintln!(
63 "Running on windows, current username is: {:?}",
64 whoami::username()
65 );
66}
67
68fn display_json_success() {
69 let json_output = serde_json::json!({
70 "status": "ok",
71 });
72 println!("{json_output}");
73}
74
75fn display_json_success_output<T: Serialize>(data: T) {
76 let json_output = serde_json::json!({
77 "status": "ok",
78 "output": data,
79 });
80 println!("{json_output}");
81}
82
83fn display_json_error<E, M>(error: E, message: M)
84where
85 E: fmt::Display,
86 M: fmt::Display,
87{
88 let json_output = serde_json::json!({
89 "status": "error",
90 "reason": format!("{error}"),
91 "message": format!("{message}")
92 });
93 println!("{json_output}");
94}
95
96fn display_json_error_context<E, M, C>(error: E, message: M, context: C)
97where
98 E: fmt::Display,
99 M: fmt::Display,
100 C: fmt::Display,
101{
102 let json_output = serde_json::json!({
103 "status": "error",
104 "reason": format!("{error}"),
105 "message": format!("{message}"),
106 "context": format!("{context}"),
107 });
108 println!("{json_output}");
109}
110
111async fn submit_admin_req_json(path: &str, req: AdminTaskRequest) -> ExitCode {
112 let stream = match UnixStream::connect(path).await {
114 Ok(s) => s,
115 Err(err) => {
116 display_json_error(err, "Unable to connect to socket path.");
117
118 return ExitCode::FAILURE;
119 }
120 };
121
122 let mut reqs = Framed::new(stream, ClientCodec);
123
124 if let Err(err) = reqs.send(req).await {
125 display_json_error(err, "Unable to connect to send request.");
126
127 return ExitCode::FAILURE;
128 };
129
130 if let Err(err) = reqs.flush().await {
131 display_json_error(err, "Unable to connect to flush request.");
132
133 return ExitCode::FAILURE;
134 }
135
136 match reqs.next().await {
137 Some(Ok(AdminTaskResponse::RecoverAccount { password })) => {
138 display_json_success_output(password)
139 }
140 Some(Ok(AdminTaskResponse::Success)) => {
141 display_json_success();
142 }
143 Some(Ok(AdminTaskResponse::Error)) => {
144 display_json_error(
145 "ResponseError",
146 "Error processing request - you should inspect the server logs.",
147 );
148 return ExitCode::FAILURE;
149 }
150 Some(Err(err)) => {
151 display_json_error(err, "Error during admin task operation.");
152 return ExitCode::FAILURE;
153 }
154 None => {
155 display_json_error("SocketClosed", "Error makeing request to admin socket.");
156 return ExitCode::FAILURE;
157 }
158
159 _ => {}
160 }
161
162 ExitCode::SUCCESS
163}
164
165async fn submit_admin_req_human(path: &str, req: AdminTaskRequest) -> ExitCode {
166 let stream = match UnixStream::connect(path).await {
168 Ok(s) => s,
169 Err(e) => {
170 error!(err = ?e, %path, "Unable to connect to socket path");
171 let diag = kanidm_lib_file_permissions::diagnose_path(path.as_ref());
172 info!(%diag);
173 return ExitCode::FAILURE;
174 }
175 };
176
177 let mut reqs = Framed::new(stream, ClientCodec);
178
179 if let Err(e) = reqs.send(req).await {
180 error!(err = ?e, "Unable to send request");
181 return ExitCode::FAILURE;
182 };
183
184 if let Err(e) = reqs.flush().await {
185 error!(err = ?e, "Unable to flush request");
186 return ExitCode::FAILURE;
187 }
188
189 trace!("flushed, waiting ...");
190
191 match reqs.next().await {
192 Some(Ok(AdminTaskResponse::RecoverAccount { password })) => info!(new_password = ?password),
193 Some(Ok(AdminTaskResponse::ShowReplicationCertificate { cert })) => {
194 info!(certificate = ?cert)
195 }
196 Some(Ok(AdminTaskResponse::ShowReplicationCertificateMetadata {
197 not_before,
198 not_after,
199 subject,
200 expired,
201 })) => {
202 info!("not_before : {}", not_before);
203 info!("not_after : {}", not_after);
204 info!("subject : {}", subject);
205 info!("expired : {}", expired);
206 }
207 Some(Ok(AdminTaskResponse::DomainUpgradeCheck { report })) => {
208 let ProtoDomainUpgradeCheckReport {
209 name,
210 uuid,
211 current_level,
212 upgrade_level,
213 report_items,
214 } = report;
215
216 info!("domain_name : {}", name);
217 info!("domain_uuid : {}", uuid);
218 info!("domain_current_level : {}", current_level);
219 info!("domain_upgrade_level : {}", upgrade_level);
220
221 if report_items.is_empty() {
222 info!("------------------------");
224 info!("status : PASS");
225 return ExitCode::SUCCESS;
226 }
227
228 for item in report_items {
229 info!("------------------------");
230 match item.status {
231 ProtoDomainUpgradeCheckStatus::Pass6To7Gidnumber => {
232 info!("upgrade_item : gidnumber range validity");
233 debug!("from_level : {}", item.from_level);
234 debug!("to_level : {}", item.to_level);
235 info!("status : PASS");
236 }
237 ProtoDomainUpgradeCheckStatus::Fail6To7Gidnumber => {
238 info!("upgrade_item : gidnumber range validity");
239 debug!("from_level : {}", item.from_level);
240 debug!("to_level : {}", item.to_level);
241 info!("status : FAIL");
242 info!("description : The automatically allocated gidnumbers for posix accounts was found to allocate numbers into systemd-reserved ranges. These can no longer be used.");
243 info!("action : Modify the gidnumber of affected entries so that they are in the range 65536 to 524287 OR reset the gidnumber to cause it to automatically regenerate.");
244 for entry_id in item.affected_entries {
245 info!("affected_entry : {}", entry_id);
246 }
247 }
248 ProtoDomainUpgradeCheckStatus::Pass7To8SecurityKeys => {
250 info!("upgrade_item : security key usage");
251 debug!("from_level : {}", item.from_level);
252 debug!("to_level : {}", item.to_level);
253 info!("status : PASS");
254 }
255 ProtoDomainUpgradeCheckStatus::Fail7To8SecurityKeys => {
256 info!("upgrade_item : security key usage");
257 debug!("from_level : {}", item.from_level);
258 debug!("to_level : {}", item.to_level);
259 info!("status : FAIL");
260 info!("description : Security keys no longer function as a second factor due to the introduction of CTAP2 and greater forcing PIN interactions.");
261 info!("action : Modify the accounts in question to remove their security key and add it as a passkey or enable TOTP");
262 for entry_id in item.affected_entries {
263 info!("affected_entry : {}", entry_id);
264 }
265 }
266 ProtoDomainUpgradeCheckStatus::Pass7To8Oauth2StrictRedirectUri => {
268 info!("upgrade_item : oauth2 strict redirect uri enforcement");
269 debug!("from_level : {}", item.from_level);
270 debug!("to_level : {}", item.to_level);
271 info!("status : PASS");
272 }
273 ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri => {
274 info!("upgrade_item : oauth2 strict redirect uri enforcement");
275 debug!("from_level : {}", item.from_level);
276 debug!("to_level : {}", item.to_level);
277 info!("status : FAIL");
278 info!("description : To harden against possible public client open redirection vulnerabilities, redirect uris must now be registered ahead of time and are validated rather than the former origin verification process.");
279 info!("action : Verify the redirect uri's for OAuth2 clients and then enable strict-redirect-uri on each client.");
280 for entry_id in item.affected_entries {
281 info!("affected_entry : {}", entry_id);
282 }
283 }
284 }
285 } }
287 Some(Ok(AdminTaskResponse::DomainRaise { level })) => {
288 info!("success - raised domain level to {}", level)
289 }
290 Some(Ok(AdminTaskResponse::DomainShow { domain_info })) => {
291 let ProtoDomainInfo {
292 name,
293 displayname,
294 uuid,
295 level,
296 } = domain_info;
297
298 info!("domain_name : {}", name);
299 info!("domain_display: {}", displayname);
300 info!("domain_uuid : {}", uuid);
301 info!("domain_level : {}", level);
302 }
303 Some(Ok(AdminTaskResponse::Success)) => info!("success"),
304 Some(Ok(AdminTaskResponse::Error)) => {
305 info!("Error - you should inspect the logs.");
306 return ExitCode::FAILURE;
307 }
308 Some(Err(err)) => {
309 error!(?err, "Error during admin task operation");
310 return ExitCode::FAILURE;
311 }
312 None => {
313 error!("Error making request to admin socket");
314 return ExitCode::FAILURE;
315 }
316 };
317
318 ExitCode::SUCCESS
319}
320
321fn check_file_ownership(opt: &KanidmdParser) -> Result<(), ExitCode> {
323 #[cfg(target_family = "unix")]
325 let (cuid, ceuid) = {
326 let cuid = get_current_uid();
327 let ceuid = get_effective_uid();
328 let cgid = get_current_gid();
329 let cegid = get_effective_gid();
330
331 if cuid == 0 || ceuid == 0 || cgid == 0 || cegid == 0 {
332 warn!("This is running as uid == 0 (root) which may be a security risk.");
333 }
336
337 if cuid != ceuid || cgid != cegid {
338 error!("{} != {} || {} != {}", cuid, ceuid, cgid, cegid);
339 error!("Refusing to run - uid and euid OR gid and egid must be consistent.");
340 return Err(ExitCode::FAILURE);
341 }
342 (cuid, ceuid)
343 };
344
345 if let Some(cfg_path) = &opt.config_path {
346 #[cfg(target_family = "unix")]
347 {
348 if let Some(cfg_meta) = match metadata(cfg_path) {
349 Ok(m) => Some(m),
350 Err(e) => {
351 error!(
352 "Unable to read metadata for configuration file '{}' - {:?}",
353 cfg_path.display(),
354 e
355 );
356 None
358 }
359 } {
360 if !kanidm_lib_file_permissions::readonly(&cfg_meta) {
361 warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...",
362 cfg_path.to_str().unwrap_or("invalid file path"));
363 }
364
365 if cfg_meta.mode() & 0o007 != 0 {
366 warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...",
367 cfg_path.to_str().unwrap_or("invalid file path")
368 );
369 }
370
371 if cfg_meta.uid() == cuid || cfg_meta.uid() == ceuid {
372 warn!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...",
373 cfg_path.to_str().unwrap_or("invalid file path")
374 );
375 }
376 }
377 }
378 }
379 Ok(())
380}
381
382async fn scripting_command(cmd: ScriptingCommand, config: Configuration) -> ExitCode {
383 match cmd {
384 ScriptingCommand::RecoverAccount { name } => {
385 submit_admin_req_json(
386 config.adminbindpath.as_str(),
387 AdminTaskRequest::RecoverAccount {
388 name: name.to_owned(),
389 },
390 )
391 .await;
392 }
393
394 ScriptingCommand::Backup { path } => {
395 backup_server_core(&config, path.as_deref());
396 }
397
398 ScriptingCommand::Reload => {
399 submit_admin_req_json(config.adminbindpath.as_str(), AdminTaskRequest::Reload).await;
400 }
401
402 ScriptingCommand::HealthCheck {
403 verify_tls,
404 check_origin,
405 } => {
406 let healthcheck_url = match check_origin {
407 true => format!("{}/status", config.origin),
408 false => {
409 format!(
411 "https://{}/status",
412 config.address[0].replace("[::]", "localhost")
413 )
414 }
415 };
416
417 let mut client = reqwest::ClientBuilder::new()
418 .danger_accept_invalid_certs(!verify_tls)
419 .danger_accept_invalid_hostnames(!verify_tls)
420 .https_only(true);
421
422 client = match &config.tls_config {
423 None => client,
424 Some(tls_config) => {
425 let ca_cert_path = tls_config.chain.clone();
427 match ca_cert_path.exists() {
428 true => {
429 let mut cert_buf = Vec::new();
430 if let Err(err) = std::fs::File::open(&ca_cert_path)
431 .and_then(|mut file| file.read_to_end(&mut cert_buf))
432 {
433 display_json_error_context(
434 err,
435 "Failed to read from filesystem.",
436 ca_cert_path.display(),
437 );
438
439 return ExitCode::FAILURE;
440 }
441
442 let ca_chain_parsed =
443 match reqwest::Certificate::from_pem_bundle(&cert_buf) {
444 Ok(val) => val,
445 Err(err) => {
446 display_json_error_context(
447 err,
448 "Failed to parse into ca_chain.",
449 ca_cert_path.display(),
450 );
451
452 return ExitCode::FAILURE;
453 }
454 };
455
456 for cert in ca_chain_parsed.into_iter().skip(1) {
458 client = client.add_root_certificate(cert)
459 }
460 client
461 }
462 false => {
463 display_json_error_context(
464 "NoSuchFile",
465 "Requested ca file does not exist.",
466 ca_cert_path.display(),
467 );
468
469 return ExitCode::FAILURE;
470 }
471 }
472 }
473 };
474 #[allow(clippy::unwrap_used)]
475 let client = client.build().unwrap();
476
477 let _ = match client.get(&healthcheck_url).send().await {
478 Ok(val) => val,
479 Err(error) => {
480 let error_message = {
481 if error.is_timeout() {
482 format!("Timeout connecting to url={healthcheck_url}")
483 } else if error.is_connect() {
484 format!("Connection failed: {error}")
485 } else {
486 format!("Failed to complete healthcheck: {error:?}")
487 }
488 };
489
490 display_json_error("HealthcheckFailed", error_message);
491
492 return ExitCode::FAILURE;
493 }
494 };
495 display_json_success();
496 }
497 }
498
499 ExitCode::SUCCESS
500}
501
502async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
504 let (provider, logging_subscriber) = match sketching::pipeline::start_logging_pipeline(
508 &config.otel_grpc_url,
509 config.log_level,
510 ) {
511 Err(err) => {
512 eprintln!("Error starting logger - {err:} - Bailing on startup!");
513 return ExitCode::FAILURE;
514 }
515 Ok(val) => val,
516 };
517
518 if let Err(err) = tracing::subscriber::set_global_default(logging_subscriber).map_err(|err| {
519 eprintln!("Error starting logger - {err:} - Bailing on startup!");
520 ExitCode::FAILURE
521 }) {
522 return err;
523 };
524
525 info!(version = %env!("KANIDM_PKG_VERSION"), "Starting Kanidmd");
529
530 let _otelguard = TracingPipelineGuard(provider);
532
533 if let Err(err) = check_file_ownership(&opt) {
538 return err;
539 };
540
541 if let Some(db_path) = config.db_path.as_ref() {
542 let db_pathbuf = db_path.to_path_buf();
543 if let Some(db_parent_path) = db_pathbuf.parent() {
545 if !db_parent_path.exists() {
546 warn!(
547 "DB folder {} may not exist, server startup may FAIL!",
548 db_parent_path.to_str().unwrap_or("invalid file path")
549 );
550 let diag = kanidm_lib_file_permissions::diagnose_path(&db_pathbuf);
551 info!(%diag);
552 }
553
554 let db_par_path_buf = db_parent_path.to_path_buf();
555 let i_meta = match metadata(&db_par_path_buf) {
556 Ok(m) => m,
557 Err(e) => {
558 error!(
559 "Unable to read metadata for database folder '{}' - {:?}",
560 &db_par_path_buf.to_str().unwrap_or("invalid file path"),
561 e
562 );
563 return ExitCode::FAILURE;
564 }
565 };
566 if !i_meta.is_dir() {
567 error!(
568 "ERROR: Refusing to run - DB folder {} may not be a directory",
569 db_par_path_buf.to_str().unwrap_or("invalid file path")
570 );
571 return ExitCode::FAILURE;
572 }
573
574 if kanidm_lib_file_permissions::readonly(&i_meta) {
575 warn!("WARNING: DB folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", db_par_path_buf.to_str().unwrap_or("invalid file path"));
576 }
577 #[cfg(not(target_os = "windows"))]
578 if i_meta.mode() & 0o007 != 0 {
579 warn!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str().unwrap_or("invalid file path"));
580 }
581 }
582 } else {
583 error!("No db_path set in configuration, server startup will FAIL!");
584 return ExitCode::FAILURE;
585 }
586
587 let lock_was_setup = match &opt.commands {
588 KanidmdOpt::ShowReplicationCertificate
590 | KanidmdOpt::RenewReplicationCertificate
591 | KanidmdOpt::RefreshReplicationConsumer { .. }
592 | KanidmdOpt::RecoverAccount { .. }
593 | KanidmdOpt::DisableAccount { .. } => None,
594 _ => {
595 #[allow(clippy::expect_used)]
597 let klock_path = match config.db_path.clone() {
598 Some(val) => val.with_extension("klock"),
599 None => std::env::temp_dir().join("kanidmd.klock"),
600 };
601
602 let flock = match File::create(&klock_path) {
603 Ok(flock) => flock,
604 Err(err) => {
605 error!(
606 "ERROR: Refusing to start - unable to create kanidmd exclusive lock at {}",
607 klock_path.display()
608 );
609 error!(?err);
610 return ExitCode::FAILURE;
611 }
612 };
613
614 match flock.try_lock_exclusive() {
615 Ok(true) => debug!("Acquired kanidm exclusive lock"),
616 Ok(false) => {
617 error!(
618 "ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {}",
619 klock_path.display()
620 );
621 error!("Is another kanidmd process running?");
622 return ExitCode::FAILURE;
623 }
624 Err(err) => {
625 error!(
626 "ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {}",
627 klock_path.display()
628 );
629 error!(?err);
630 return ExitCode::FAILURE;
631 }
632 };
633
634 Some(klock_path)
635 }
636 };
637
638 let result_code = kanidm_main(config, opt).await;
639
640 if let Some(klock_path) = lock_was_setup {
641 if let Err(reason) = std::fs::remove_file(&klock_path) {
642 warn!(
643 ?reason,
644 "WARNING: Unable to clean up kanidmd exclusive lock at {}",
645 klock_path.display()
646 );
647 }
648 }
649
650 result_code
651}
652
653fn main() -> ExitCode {
654 #[cfg(all(target_os = "linux", not(debug_assertions)))]
657 if let Err(code) = prctl::set_dumpable(false) {
658 println!(
659 "CRITICAL: Unable to set prctl flags, which breaches our security model, quitting! {:?}", code
660 );
661 return ExitCode::FAILURE;
662 }
663
664 #[cfg(feature = "dhat-heap")]
666 let _profiler = dhat::Profiler::builder().trim_backtraces(Some(40)).build();
667
668 let opt = KanidmdParser::parse();
670
671 if let KanidmdOpt::Version = &opt.commands {
673 println!("kanidmd {}", env!("KANIDM_PKG_VERSION"));
674 return ExitCode::SUCCESS;
675 };
676
677 if env!("KANIDM_SERVER_CONFIG_PATH").is_empty() {
678 eprintln!("CRITICAL: Kanidmd was not built correctly and is missing a valid KANIDM_SERVER_CONFIG_PATH value");
679 return ExitCode::FAILURE;
680 }
681
682 let default_config_path = PathBuf::from(env!("KANIDM_SERVER_CONFIG_PATH"));
683
684 let maybe_config_path = if let Some(p) = &opt.config_path {
685 Some(p.clone())
686 } else {
687 if default_config_path.exists() {
689 Some(default_config_path)
691 } else {
692 None
695 }
696 };
697
698 let maybe_sconfig = if let Some(config_path) = maybe_config_path {
699 match ServerConfigUntagged::new(config_path) {
700 Ok(c) => Some(c),
701 Err(err) => {
702 eprintln!("ERROR: Configuration Parse Failure: {err:?}");
703 return ExitCode::FAILURE;
704 }
705 }
706 } else {
707 eprintln!("WARNING: No configuration path was provided, relying on environment variables.");
708 None
709 };
710
711 let is_server = matches!(&opt.commands, KanidmdOpt::Server);
712
713 let config = Configuration::build()
714 .add_opt_toml_config(maybe_sconfig)
715 .add_cli_config(&opt.kanidmd_options)
716 .is_server_mode(is_server)
718 .finish();
719
720 let Some(config) = config else {
721 eprintln!(
722 "ERROR: Unable to build server configuration from provided configuration inputs."
723 );
724 return ExitCode::FAILURE;
725 };
726
727 #[cfg(target_family = "windows")]
732 get_user_details_windows();
733
734 let maybe_rt = tokio::runtime::Builder::new_multi_thread()
736 .worker_threads(config.threads)
737 .enable_all()
738 .thread_name("kanidmd-thread-pool")
739 .build();
745
746 let rt = match maybe_rt {
747 Ok(rt) => rt,
748 Err(err) => {
749 eprintln!("CRITICAL: Unable to start runtime! {err:?}");
750 return ExitCode::FAILURE;
751 }
752 };
753
754 if let KanidmdOpt::Scripting { command } = opt.commands {
757 rt.block_on(scripting_command(command, config))
758 } else {
759 rt.block_on(start_daemon(opt, config))
760 }
761}
762
763async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
766 match &opt.commands {
767 KanidmdOpt::Server | KanidmdOpt::ConfigTest => {
768 let config_test = matches!(&opt.commands, KanidmdOpt::ConfigTest);
769 if config_test {
770 info!("Running in server configuration test mode ...");
771 } else {
772 info!("Running in server mode ...");
773 };
774
775 if let Some(tls_config) = config.tls_config.as_ref() {
777 {
778 let i_meta = match metadata(&tls_config.chain) {
779 Ok(m) => m,
780 Err(e) => {
781 error!(
782 "Unable to read metadata for TLS chain file '{}' - {:?}",
783 tls_config.chain.display(),
784 e
785 );
786 let diag =
787 kanidm_lib_file_permissions::diagnose_path(&tls_config.chain);
788 info!(%diag);
789 return ExitCode::FAILURE;
790 }
791 };
792 if !kanidm_lib_file_permissions::readonly(&i_meta) {
793 warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.chain.display());
794 }
795 }
796
797 {
798 let i_meta = match metadata(&tls_config.key) {
799 Ok(m) => m,
800 Err(e) => {
801 error!(
802 "Unable to read metadata for TLS key file '{}' - {:?}",
803 tls_config.key.display(),
804 e
805 );
806 let diag = kanidm_lib_file_permissions::diagnose_path(&tls_config.key);
807 info!(%diag);
808 return ExitCode::FAILURE;
809 }
810 };
811 if !kanidm_lib_file_permissions::readonly(&i_meta) {
812 warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.key.display());
813 }
814 #[cfg(not(target_os = "windows"))]
815 if i_meta.mode() & 0o007 != 0 {
816 warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", tls_config.key.display());
817 }
818 }
819
820 if let Some(ca_dir) = tls_config.client_ca.as_ref() {
821 let ca_dir_path = PathBuf::from(&ca_dir);
823 if !ca_dir_path.exists() {
824 error!(
825 "TLS CA folder {} does not exist, server startup will FAIL!",
826 ca_dir.display()
827 );
828 let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
829 info!(%diag);
830 }
831
832 let i_meta = match metadata(&ca_dir_path) {
833 Ok(m) => m,
834 Err(e) => {
835 error!(
836 "Unable to read metadata for '{}' - {:?}",
837 ca_dir.display(),
838 e
839 );
840 let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
841 info!(%diag);
842 return ExitCode::FAILURE;
843 }
844 };
845 if !i_meta.is_dir() {
846 error!(
847 "ERROR: Refusing to run - TLS Client CA folder {} may not be a directory",
848 ca_dir.display()
849 );
850 return ExitCode::FAILURE;
851 }
852 if kanidm_lib_file_permissions::readonly(&i_meta) {
853 warn!("WARNING: TLS Client CA folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", ca_dir.display());
854 }
855 #[cfg(not(target_os = "windows"))]
856 if i_meta.mode() & 0o007 != 0 {
857 warn!("WARNING: TLS Client CA folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", ca_dir.display());
858 }
859 }
860 }
861
862 let sctx = create_server_core(config, config_test).await;
863 if !config_test {
864 #[cfg(target_os = "linux")]
866 unsafe {
867 let _ = sd_notify::notify_and_unset_env(&[sd_notify::NotifyState::Ready]);
868 let _ = sd_notify::notify_and_unset_env(&[sd_notify::NotifyState::Status(
869 "Started Kanidm 🦀",
870 )]);
871 }
872
873 match sctx {
874 Ok(mut sctx) => {
875 loop {
876 #[cfg(target_family = "unix")]
877 {
878 let mut listener = sctx.subscribe();
879 tokio::select! {
880 Ok(()) = tokio::signal::ctrl_c() => {
881 break
882 }
883 Some(()) = async move {
884 let sigterm = tokio::signal::unix::SignalKind::terminate();
885 #[allow(clippy::unwrap_used)]
886 tokio::signal::unix::signal(sigterm).unwrap().recv().await
887 } => {
888 break
889 }
890 Some(()) = async move {
891 let sigterm = tokio::signal::unix::SignalKind::alarm();
892 #[allow(clippy::unwrap_used)]
893 tokio::signal::unix::signal(sigterm).unwrap().recv().await
894 } => {
895 }
897 Some(()) = async move {
898 let sigterm = tokio::signal::unix::SignalKind::hangup();
899 #[allow(clippy::unwrap_used)]
900 tokio::signal::unix::signal(sigterm).unwrap().recv().await
901 } => {
902 sctx.reload().await;
904 info!("Reload complete");
905 }
906 Some(()) = async move {
907 let sigterm = tokio::signal::unix::SignalKind::user_defined1();
908 #[allow(clippy::unwrap_used)]
909 tokio::signal::unix::signal(sigterm).unwrap().recv().await
910 } => {
911 }
913 Some(()) = async move {
914 let sigterm = tokio::signal::unix::SignalKind::user_defined2();
915 #[allow(clippy::unwrap_used)]
916 tokio::signal::unix::signal(sigterm).unwrap().recv().await
917 } => {
918 }
920 Ok(msg) = async move {
922 listener.recv().await
923 } =>
924 match msg {
925 CoreAction::Shutdown => break,
926 CoreAction::Reload => {}
927 },
928 }
929 }
930 #[cfg(target_family = "windows")]
931 {
932 tokio::select! {
933 Ok(()) = tokio::signal::ctrl_c() => {
934 break
935 }
936 }
937 }
938 }
939 info!("Signal received, shutting down");
940 sctx.shutdown().await;
942 }
943 Err(_) => {
944 error!("Failed to start server core!");
945 return ExitCode::FAILURE;
948 }
949 }
950 info!("Stopped 🛑 ");
951 }
952 }
953 KanidmdOpt::CertGenerate => {
954 info!("Running in certificate generate mode ...");
955 cert_generate_core(&config);
956 }
957 KanidmdOpt::Database {
958 commands: DbCommands::Backup(bopt),
959 } => {
960 info!("Running in backup mode ...");
961
962 backup_server_core(&config, Some(&bopt.path));
963 }
964 KanidmdOpt::Database {
965 commands: DbCommands::Restore(ropt),
966 } => {
967 info!("Running in restore mode ...");
968 restore_server_core(&config, &ropt.path).await;
969 }
970 KanidmdOpt::Database {
971 commands: DbCommands::Verify,
972 } => {
973 info!("Running in db verification mode ...");
974 verify_server_core(&config).await;
975 }
976 KanidmdOpt::ShowReplicationCertificate => {
977 info!("Running show replication certificate ...");
978 submit_admin_req_human(
979 config.adminbindpath.as_str(),
980 AdminTaskRequest::ShowReplicationCertificate,
981 )
982 .await;
983 }
984 KanidmdOpt::ShowReplicationCertificateMetadata => {
985 info!("Running show replication certificate metadata ...");
986 submit_admin_req_human(
987 config.adminbindpath.as_str(),
988 AdminTaskRequest::ShowReplicationCertificateMetadata,
989 )
990 .await;
991 }
992
993 KanidmdOpt::RenewReplicationCertificate => {
994 info!("Running renew replication certificate ...");
995 submit_admin_req_human(
996 config.adminbindpath.as_str(),
997 AdminTaskRequest::RenewReplicationCertificate,
998 )
999 .await;
1000 }
1001 KanidmdOpt::RefreshReplicationConsumer { proceed } => {
1002 info!("Running refresh replication consumer ...");
1003 if !proceed {
1004 error!("Unwilling to proceed. Check --help.");
1005 } else {
1006 submit_admin_req_human(
1007 config.adminbindpath.as_str(),
1008 AdminTaskRequest::RefreshReplicationConsumer,
1009 )
1010 .await;
1011 }
1012 }
1013 KanidmdOpt::RecoverAccount { name } => {
1014 info!("Running account recovery ...");
1015
1016 submit_admin_req_human(
1017 config.adminbindpath.as_str(),
1018 AdminTaskRequest::RecoverAccount {
1019 name: name.to_owned(),
1020 },
1021 )
1022 .await;
1023 }
1024 KanidmdOpt::DisableAccount { name } => {
1025 info!("Running account disable ...");
1026
1027 submit_admin_req_human(
1028 config.adminbindpath.as_str(),
1029 AdminTaskRequest::DisableAccount {
1030 name: name.to_owned(),
1031 },
1032 )
1033 .await;
1034 }
1035 KanidmdOpt::Database {
1036 commands: DbCommands::Reindex,
1037 } => {
1038 info!("Running in reindex mode ...");
1039 reindex_server_core(&config).await;
1040 }
1041 KanidmdOpt::DbScan {
1042 commands: DbScanOpt::ListIndexes,
1043 } => {
1044 info!("👀 db scan - list indexes");
1045 dbscan_list_indexes_core(&config);
1046 }
1047 KanidmdOpt::DbScan {
1048 commands: DbScanOpt::ListId2Entry,
1049 } => {
1050 info!("👀 db scan - list id2entry");
1051 dbscan_list_id2entry_core(&config);
1052 }
1053 KanidmdOpt::DbScan {
1054 commands: DbScanOpt::ListIndexAnalysis,
1055 } => {
1056 info!("👀 db scan - list index analysis");
1057 dbscan_list_index_analysis_core(&config);
1058 }
1059 KanidmdOpt::DbScan {
1060 commands: DbScanOpt::ListIndex(dopt),
1061 } => {
1062 info!("👀 db scan - list index content - {}", dopt.index_name);
1063 dbscan_list_index_core(&config, dopt.index_name.as_str());
1064 }
1065 KanidmdOpt::DbScan {
1066 commands: DbScanOpt::GetId2Entry(dopt),
1067 } => {
1068 info!("👀 db scan - get id2 entry - {}", dopt.id);
1069 dbscan_get_id2entry_core(&config, dopt.id);
1070 }
1071
1072 KanidmdOpt::DbScan {
1073 commands: DbScanOpt::QuarantineId2Entry { id },
1074 } => {
1075 info!("☣️ db scan - quarantine id2 entry - {}", id);
1076 dbscan_quarantine_id2entry_core(&config, *id);
1077 }
1078
1079 KanidmdOpt::DbScan {
1080 commands: DbScanOpt::ListQuarantined,
1081 } => {
1082 info!("☣️ db scan - list quarantined");
1083 dbscan_list_quarantined_core(&config);
1084 }
1085
1086 KanidmdOpt::DbScan {
1087 commands: DbScanOpt::RestoreQuarantined { id },
1088 } => {
1089 info!("☣️ db scan - restore quarantined entry - {}", id);
1090 dbscan_restore_quarantined_core(&config, *id);
1091 }
1092
1093 KanidmdOpt::DomainSettings {
1094 commands: DomainSettingsCmds::Change,
1095 } => {
1096 info!("Running in domain name change mode ... this may take a long time ...");
1097 domain_rename_core(&config).await;
1098 }
1099
1100 KanidmdOpt::DomainSettings {
1101 commands: DomainSettingsCmds::Show,
1102 } => {
1103 info!("Running domain show ...");
1104
1105 submit_admin_req_human(config.adminbindpath.as_str(), AdminTaskRequest::DomainShow)
1106 .await;
1107 }
1108
1109 KanidmdOpt::DomainSettings {
1110 commands: DomainSettingsCmds::UpgradeCheck,
1111 } => {
1112 info!("Running domain upgrade check ...");
1113
1114 submit_admin_req_human(
1115 config.adminbindpath.as_str(),
1116 AdminTaskRequest::DomainUpgradeCheck,
1117 )
1118 .await;
1119 }
1120
1121 KanidmdOpt::DomainSettings {
1122 commands: DomainSettingsCmds::Raise,
1123 } => {
1124 info!("Running domain raise ...");
1125
1126 submit_admin_req_human(config.adminbindpath.as_str(), AdminTaskRequest::DomainRaise)
1127 .await;
1128 }
1129
1130 KanidmdOpt::DomainSettings {
1131 commands: DomainSettingsCmds::Remigrate { level },
1132 } => {
1133 info!("⚠️ Running domain remigrate ...");
1134
1135 submit_admin_req_human(
1136 config.adminbindpath.as_str(),
1137 AdminTaskRequest::DomainRemigrate { level: *level },
1138 )
1139 .await;
1140 }
1141
1142 KanidmdOpt::Database {
1143 commands: DbCommands::Vacuum,
1144 } => {
1145 info!("Running in vacuum mode ...");
1146 vacuum_server_core(&config);
1147 }
1148 KanidmdOpt::Scripting { .. } | KanidmdOpt::Version => {}
1149 }
1150 ExitCode::SUCCESS
1151}