1#![deny(warnings)]
2#![warn(unused_extern_crates)]
3#![deny(clippy::todo)]
4#![deny(clippy::unimplemented)]
5#![deny(clippy::unwrap_used)]
6#![deny(clippy::expect_used)]
7#![deny(clippy::panic)]
8#![deny(clippy::unreachable)]
9#![deny(clippy::await_holding_lock)]
10#![deny(clippy::needless_pass_by_value)]
11#![deny(clippy::trivially_copy_pass_by_ref)]
12
13#[cfg(all(not(feature = "dhat-heap"), target_os = "linux"))]
14#[global_allocator]
15static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
16
17#[cfg(feature = "dhat-heap")]
18#[global_allocator]
19static ALLOC: dhat::Alloc = dhat::Alloc;
20
21#[cfg(target_family = "unix")]
22use std::os::unix::fs::MetadataExt;
23
24#[cfg(target_family = "unix")]
25use kanidm_utils_users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid};
26
27#[cfg(target_family = "windows")] use whoami;
29
30use std::fs::{metadata, File};
31use clap::{Args, Parser, Subcommand};
33use fs4::fs_std::FileExt;
34use futures::{SinkExt, StreamExt};
35use kanidmd_core::admin::{
36 AdminTaskRequest, AdminTaskResponse, ClientCodec, ProtoDomainInfo,
37 ProtoDomainUpgradeCheckReport, ProtoDomainUpgradeCheckStatus,
38};
39use kanidmd_core::config::{Configuration, ServerConfigUntagged};
40use kanidmd_core::{
41 backup_server_core, cert_generate_core, create_server_core, dbscan_get_id2entry_core,
42 dbscan_list_id2entry_core, dbscan_list_index_analysis_core, dbscan_list_index_core,
43 dbscan_list_indexes_core, dbscan_list_quarantined_core, dbscan_quarantine_id2entry_core,
44 dbscan_restore_quarantined_core, domain_rename_core, reindex_server_core, restore_server_core,
45 vacuum_server_core, verify_server_core, CoreAction,
46};
47use serde::Serialize;
48use sketching::pipeline::TracingPipelineGuard;
49use sketching::tracing_forest::util::*;
50use std::fmt;
51use std::io::Read;
52use std::path::PathBuf;
53use std::process::ExitCode;
54use tokio::net::UnixStream;
55use tokio_util::codec::Framed;
56
57include!("./opt.rs");
58
59#[cfg(target_family = "windows")]
61fn get_user_details_windows() {
62 eprintln!(
63 "Running on windows, current username is: {:?}",
64 whoami::username()
65 );
66}
67
68fn display_json_success() {
69 let json_output = serde_json::json!({
70 "status": "ok",
71 });
72 println!("{json_output}");
73}
74
75fn display_json_success_output<T: Serialize>(data: T) {
76 let json_output = serde_json::json!({
77 "status": "ok",
78 "output": data,
79 });
80 println!("{json_output}");
81}
82
83fn display_json_error<E, M>(error: E, message: M)
84where
85 E: fmt::Display,
86 M: fmt::Display,
87{
88 let json_output = serde_json::json!({
89 "status": "error",
90 "reason": format!("{error}"),
91 "message": format!("{message}")
92 });
93 println!("{json_output}");
94}
95
96fn display_json_error_context<E, M, C>(error: E, message: M, context: C)
97where
98 E: fmt::Display,
99 M: fmt::Display,
100 C: fmt::Display,
101{
102 let json_output = serde_json::json!({
103 "status": "error",
104 "reason": format!("{error}"),
105 "message": format!("{message}"),
106 "context": format!("{context}"),
107 });
108 println!("{json_output}");
109}
110
111async fn submit_admin_req_json(path: &str, req: AdminTaskRequest) -> ExitCode {
112 let stream = match UnixStream::connect(path).await {
114 Ok(s) => s,
115 Err(err) => {
116 display_json_error(err, "Unable to connect to socket path.");
117
118 return ExitCode::FAILURE;
119 }
120 };
121
122 let mut reqs = Framed::new(stream, ClientCodec);
123
124 if let Err(err) = reqs.send(req).await {
125 display_json_error(err, "Unable to connect to send request.");
126
127 return ExitCode::FAILURE;
128 };
129
130 if let Err(err) = reqs.flush().await {
131 display_json_error(err, "Unable to connect to flush request.");
132
133 return ExitCode::FAILURE;
134 }
135
136 match reqs.next().await {
137 Some(Ok(AdminTaskResponse::RecoverAccount { password })) => {
138 display_json_success_output(password)
139 }
140 Some(Ok(AdminTaskResponse::Success)) => {
141 display_json_success();
142 }
143 Some(Ok(AdminTaskResponse::Error)) => {
144 display_json_error(
145 "ResponseError",
146 "Error processing request - you should inspect the server logs.",
147 );
148 return ExitCode::FAILURE;
149 }
150 Some(Err(err)) => {
151 display_json_error(err, "Error during admin task operation.");
152 return ExitCode::FAILURE;
153 }
154 None => {
155 display_json_error("SocketClosed", "Error makeing request to admin socket.");
156 return ExitCode::FAILURE;
157 }
158
159 _ => {}
160 }
161
162 ExitCode::SUCCESS
163}
164
165async fn submit_admin_req_human(path: &str, req: AdminTaskRequest) -> ExitCode {
166 let stream = match UnixStream::connect(path).await {
168 Ok(s) => s,
169 Err(e) => {
170 error!(err = ?e, %path, "Unable to connect to socket path");
171 let diag = kanidm_lib_file_permissions::diagnose_path(path.as_ref());
172 info!(%diag);
173 return ExitCode::FAILURE;
174 }
175 };
176
177 let mut reqs = Framed::new(stream, ClientCodec);
178
179 if let Err(e) = reqs.send(req).await {
180 error!(err = ?e, "Unable to send request");
181 return ExitCode::FAILURE;
182 };
183
184 if let Err(e) = reqs.flush().await {
185 error!(err = ?e, "Unable to flush request");
186 return ExitCode::FAILURE;
187 }
188
189 trace!("flushed, waiting ...");
190
191 match reqs.next().await {
192 Some(Ok(AdminTaskResponse::RecoverAccount { password })) => info!(new_password = ?password),
193 Some(Ok(AdminTaskResponse::ShowReplicationCertificate { cert })) => {
194 info!(certificate = ?cert)
195 }
196 Some(Ok(AdminTaskResponse::DomainUpgradeCheck { report })) => {
197 let ProtoDomainUpgradeCheckReport {
198 name,
199 uuid,
200 current_level,
201 upgrade_level,
202 report_items,
203 } = report;
204
205 info!("domain_name : {}", name);
206 info!("domain_uuid : {}", uuid);
207 info!("domain_current_level : {}", current_level);
208 info!("domain_upgrade_level : {}", upgrade_level);
209
210 if report_items.is_empty() {
211 info!("------------------------");
213 info!("status : PASS");
214 return ExitCode::SUCCESS;
215 }
216
217 for item in report_items {
218 info!("------------------------");
219 match item.status {
220 ProtoDomainUpgradeCheckStatus::Pass6To7Gidnumber => {
221 info!("upgrade_item : gidnumber range validity");
222 debug!("from_level : {}", item.from_level);
223 debug!("to_level : {}", item.to_level);
224 info!("status : PASS");
225 }
226 ProtoDomainUpgradeCheckStatus::Fail6To7Gidnumber => {
227 info!("upgrade_item : gidnumber range validity");
228 debug!("from_level : {}", item.from_level);
229 debug!("to_level : {}", item.to_level);
230 info!("status : FAIL");
231 info!("description : The automatically allocated gidnumbers for posix accounts was found to allocate numbers into systemd-reserved ranges. These can no longer be used.");
232 info!("action : Modify the gidnumber of affected entries so that they are in the range 65536 to 524287 OR reset the gidnumber to cause it to automatically regenerate.");
233 for entry_id in item.affected_entries {
234 info!("affected_entry : {}", entry_id);
235 }
236 }
237 ProtoDomainUpgradeCheckStatus::Pass7To8SecurityKeys => {
239 info!("upgrade_item : security key usage");
240 debug!("from_level : {}", item.from_level);
241 debug!("to_level : {}", item.to_level);
242 info!("status : PASS");
243 }
244 ProtoDomainUpgradeCheckStatus::Fail7To8SecurityKeys => {
245 info!("upgrade_item : security key usage");
246 debug!("from_level : {}", item.from_level);
247 debug!("to_level : {}", item.to_level);
248 info!("status : FAIL");
249 info!("description : Security keys no longer function as a second factor due to the introduction of CTAP2 and greater forcing PIN interactions.");
250 info!("action : Modify the accounts in question to remove their security key and add it as a passkey or enable TOTP");
251 for entry_id in item.affected_entries {
252 info!("affected_entry : {}", entry_id);
253 }
254 }
255 ProtoDomainUpgradeCheckStatus::Pass7To8Oauth2StrictRedirectUri => {
257 info!("upgrade_item : oauth2 strict redirect uri enforcement");
258 debug!("from_level : {}", item.from_level);
259 debug!("to_level : {}", item.to_level);
260 info!("status : PASS");
261 }
262 ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri => {
263 info!("upgrade_item : oauth2 strict redirect uri enforcement");
264 debug!("from_level : {}", item.from_level);
265 debug!("to_level : {}", item.to_level);
266 info!("status : FAIL");
267 info!("description : To harden against possible public client open redirection vulnerabilities, redirect uris must now be registered ahead of time and are validated rather than the former origin verification process.");
268 info!("action : Verify the redirect uri's for OAuth2 clients and then enable strict-redirect-uri on each client.");
269 for entry_id in item.affected_entries {
270 info!("affected_entry : {}", entry_id);
271 }
272 }
273 }
274 } }
276 Some(Ok(AdminTaskResponse::DomainRaise { level })) => {
277 info!("success - raised domain level to {}", level)
278 }
279 Some(Ok(AdminTaskResponse::DomainShow { domain_info })) => {
280 let ProtoDomainInfo {
281 name,
282 displayname,
283 uuid,
284 level,
285 } = domain_info;
286
287 info!("domain_name : {}", name);
288 info!("domain_display: {}", displayname);
289 info!("domain_uuid : {}", uuid);
290 info!("domain_level : {}", level);
291 }
292 Some(Ok(AdminTaskResponse::Success)) => info!("success"),
293 Some(Ok(AdminTaskResponse::Error)) => {
294 info!("Error - you should inspect the logs.");
295 return ExitCode::FAILURE;
296 }
297 Some(Err(err)) => {
298 error!(?err, "Error during admin task operation");
299 return ExitCode::FAILURE;
300 }
301 None => {
302 error!("Error making request to admin socket");
303 return ExitCode::FAILURE;
304 }
305 };
306
307 ExitCode::SUCCESS
308}
309
310fn check_file_ownership(opt: &KanidmdParser) -> Result<(), ExitCode> {
312 #[cfg(target_family = "unix")]
314 let (cuid, ceuid) = {
315 let cuid = get_current_uid();
316 let ceuid = get_effective_uid();
317 let cgid = get_current_gid();
318 let cegid = get_effective_gid();
319
320 if cuid == 0 || ceuid == 0 || cgid == 0 || cegid == 0 {
321 warn!("This is running as uid == 0 (root) which may be a security risk.");
322 }
325
326 if cuid != ceuid || cgid != cegid {
327 error!("{} != {} || {} != {}", cuid, ceuid, cgid, cegid);
328 error!("Refusing to run - uid and euid OR gid and egid must be consistent.");
329 return Err(ExitCode::FAILURE);
330 }
331 (cuid, ceuid)
332 };
333
334 if let Some(cfg_path) = &opt.config_path {
335 #[cfg(target_family = "unix")]
336 {
337 if let Some(cfg_meta) = match metadata(cfg_path) {
338 Ok(m) => Some(m),
339 Err(e) => {
340 error!(
341 "Unable to read metadata for configuration file '{}' - {:?}",
342 cfg_path.display(),
343 e
344 );
345 None
347 }
348 } {
349 if !kanidm_lib_file_permissions::readonly(&cfg_meta) {
350 warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...",
351 cfg_path.to_str().unwrap_or("invalid file path"));
352 }
353
354 if cfg_meta.mode() & 0o007 != 0 {
355 warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...",
356 cfg_path.to_str().unwrap_or("invalid file path")
357 );
358 }
359
360 if cfg_meta.uid() == cuid || cfg_meta.uid() == ceuid {
361 warn!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...",
362 cfg_path.to_str().unwrap_or("invalid file path")
363 );
364 }
365 }
366 }
367 }
368 Ok(())
369}
370
371async fn scripting_command(cmd: ScriptingCommand, config: Configuration) -> ExitCode {
372 match cmd {
373 ScriptingCommand::RecoverAccount { name } => {
374 submit_admin_req_json(
375 config.adminbindpath.as_str(),
376 AdminTaskRequest::RecoverAccount {
377 name: name.to_owned(),
378 },
379 )
380 .await;
381 }
382
383 ScriptingCommand::Backup { path } => {
384 backup_server_core(&config, path.as_deref());
385 }
386
387 ScriptingCommand::Reload => {
388 submit_admin_req_json(config.adminbindpath.as_str(), AdminTaskRequest::Reload).await;
389 }
390
391 ScriptingCommand::HealthCheck {
392 verify_tls,
393 check_origin,
394 } => {
395 let healthcheck_url = match check_origin {
396 true => format!("{}/status", config.origin),
397 false => {
398 format!(
400 "https://{}/status",
401 config.address[0].replace("[::]", "localhost")
402 )
403 }
404 };
405
406 let mut client = reqwest::ClientBuilder::new()
407 .danger_accept_invalid_certs(!verify_tls)
408 .danger_accept_invalid_hostnames(!verify_tls)
409 .https_only(true);
410
411 client = match &config.tls_config {
412 None => client,
413 Some(tls_config) => {
414 let ca_cert_path = tls_config.chain.clone();
416 match ca_cert_path.exists() {
417 true => {
418 let mut cert_buf = Vec::new();
419 if let Err(err) = std::fs::File::open(&ca_cert_path)
420 .and_then(|mut file| file.read_to_end(&mut cert_buf))
421 {
422 display_json_error_context(
423 err,
424 "Failed to read from filesystem.",
425 ca_cert_path.display(),
426 );
427
428 return ExitCode::FAILURE;
429 }
430
431 let ca_chain_parsed =
432 match reqwest::Certificate::from_pem_bundle(&cert_buf) {
433 Ok(val) => val,
434 Err(err) => {
435 display_json_error_context(
436 err,
437 "Failed to parse into ca_chain.",
438 ca_cert_path.display(),
439 );
440
441 return ExitCode::FAILURE;
442 }
443 };
444
445 for cert in ca_chain_parsed.into_iter().skip(1) {
447 client = client.add_root_certificate(cert)
448 }
449 client
450 }
451 false => {
452 display_json_error_context(
453 "NoSuchFile",
454 "Requested ca file does not exist.",
455 ca_cert_path.display(),
456 );
457
458 return ExitCode::FAILURE;
459 }
460 }
461 }
462 };
463 #[allow(clippy::unwrap_used)]
464 let client = client.build().unwrap();
465
466 let _ = match client.get(&healthcheck_url).send().await {
467 Ok(val) => val,
468 Err(error) => {
469 let error_message = {
470 if error.is_timeout() {
471 format!("Timeout connecting to url={healthcheck_url}")
472 } else if error.is_connect() {
473 format!("Connection failed: {error}")
474 } else {
475 format!("Failed to complete healthcheck: {error:?}")
476 }
477 };
478
479 display_json_error("HealthcheckFailed", error_message);
480
481 return ExitCode::FAILURE;
482 }
483 };
484 display_json_success();
485 }
486 }
487
488 ExitCode::SUCCESS
489}
490
491async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
493 let (provider, logging_subscriber) = match sketching::pipeline::start_logging_pipeline(
497 &config.otel_grpc_url,
498 config.log_level,
499 ) {
500 Err(err) => {
501 eprintln!("Error starting logger - {err:} - Bailing on startup!");
502 return ExitCode::FAILURE;
503 }
504 Ok(val) => val,
505 };
506
507 if let Err(err) = tracing::subscriber::set_global_default(logging_subscriber).map_err(|err| {
508 eprintln!("Error starting logger - {err:} - Bailing on startup!");
509 ExitCode::FAILURE
510 }) {
511 return err;
512 };
513
514 info!(version = %env!("KANIDM_PKG_VERSION"), "Starting Kanidmd");
518
519 let _otelguard = TracingPipelineGuard(provider);
521
522 if let Err(err) = check_file_ownership(&opt) {
527 return err;
528 };
529
530 if let Some(db_path) = config.db_path.as_ref() {
531 let db_pathbuf = db_path.to_path_buf();
532 if let Some(db_parent_path) = db_pathbuf.parent() {
534 if !db_parent_path.exists() {
535 warn!(
536 "DB folder {} may not exist, server startup may FAIL!",
537 db_parent_path.to_str().unwrap_or("invalid file path")
538 );
539 let diag = kanidm_lib_file_permissions::diagnose_path(&db_pathbuf);
540 info!(%diag);
541 }
542
543 let db_par_path_buf = db_parent_path.to_path_buf();
544 let i_meta = match metadata(&db_par_path_buf) {
545 Ok(m) => m,
546 Err(e) => {
547 error!(
548 "Unable to read metadata for database folder '{}' - {:?}",
549 &db_par_path_buf.to_str().unwrap_or("invalid file path"),
550 e
551 );
552 return ExitCode::FAILURE;
553 }
554 };
555 if !i_meta.is_dir() {
556 error!(
557 "ERROR: Refusing to run - DB folder {} may not be a directory",
558 db_par_path_buf.to_str().unwrap_or("invalid file path")
559 );
560 return ExitCode::FAILURE;
561 }
562
563 if kanidm_lib_file_permissions::readonly(&i_meta) {
564 warn!("WARNING: DB folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", db_par_path_buf.to_str().unwrap_or("invalid file path"));
565 }
566 #[cfg(not(target_os = "windows"))]
567 if i_meta.mode() & 0o007 != 0 {
568 warn!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str().unwrap_or("invalid file path"));
569 }
570 }
571 } else {
572 error!("No db_path set in configuration, server startup will FAIL!");
573 return ExitCode::FAILURE;
574 }
575
576 let lock_was_setup = match &opt.commands {
577 KanidmdOpt::ShowReplicationCertificate
579 | KanidmdOpt::RenewReplicationCertificate
580 | KanidmdOpt::RefreshReplicationConsumer { .. }
581 | KanidmdOpt::RecoverAccount { .. }
582 | KanidmdOpt::DisableAccount { .. } => None,
583 _ => {
584 #[allow(clippy::expect_used)]
586 let klock_path = match config.db_path.clone() {
587 Some(val) => val.with_extension("klock"),
588 None => std::env::temp_dir().join("kanidmd.klock"),
589 };
590
591 let flock = match File::create(&klock_path) {
592 Ok(flock) => flock,
593 Err(err) => {
594 error!(
595 "ERROR: Refusing to start - unable to create kanidmd exclusive lock at {}",
596 klock_path.display()
597 );
598 error!(?err);
599 return ExitCode::FAILURE;
600 }
601 };
602
603 match flock.try_lock_exclusive() {
604 Ok(true) => debug!("Acquired kanidm exclusive lock"),
605 Ok(false) => {
606 error!(
607 "ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {}",
608 klock_path.display()
609 );
610 error!("Is another kanidmd process running?");
611 return ExitCode::FAILURE;
612 }
613 Err(err) => {
614 error!(
615 "ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {}",
616 klock_path.display()
617 );
618 error!(?err);
619 return ExitCode::FAILURE;
620 }
621 };
622
623 Some(klock_path)
624 }
625 };
626
627 let result_code = kanidm_main(config, opt).await;
628
629 if let Some(klock_path) = lock_was_setup {
630 if let Err(reason) = std::fs::remove_file(&klock_path) {
631 warn!(
632 ?reason,
633 "WARNING: Unable to clean up kanidmd exclusive lock at {}",
634 klock_path.display()
635 );
636 }
637 }
638
639 result_code
640}
641
642fn main() -> ExitCode {
643 #[cfg(all(target_os = "linux", not(debug_assertions)))]
646 if let Err(code) = prctl::set_dumpable(false) {
647 println!(
648 "CRITICAL: Unable to set prctl flags, which breaches our security model, quitting! {:?}", code
649 );
650 return ExitCode::FAILURE;
651 }
652
653 #[cfg(feature = "dhat-heap")]
655 let _profiler = dhat::Profiler::builder().trim_backtraces(Some(40)).build();
656
657 let opt = KanidmdParser::parse();
659
660 if let KanidmdOpt::Version = &opt.commands {
662 println!("kanidmd {}", env!("KANIDM_PKG_VERSION"));
663 return ExitCode::SUCCESS;
664 };
665
666 if env!("KANIDM_SERVER_CONFIG_PATH").is_empty() {
667 eprintln!("CRITICAL: Kanidmd was not built correctly and is missing a valid KANIDM_SERVER_CONFIG_PATH value");
668 return ExitCode::FAILURE;
669 }
670
671 let default_config_path = PathBuf::from(env!("KANIDM_SERVER_CONFIG_PATH"));
672
673 let maybe_config_path = if let Some(p) = &opt.config_path {
674 Some(p.clone())
675 } else {
676 if default_config_path.exists() {
678 Some(default_config_path)
680 } else {
681 None
684 }
685 };
686
687 let maybe_sconfig = if let Some(config_path) = maybe_config_path {
688 match ServerConfigUntagged::new(config_path) {
689 Ok(c) => Some(c),
690 Err(err) => {
691 eprintln!("ERROR: Configuration Parse Failure: {err:?}");
692 return ExitCode::FAILURE;
693 }
694 }
695 } else {
696 eprintln!("WARNING: No configuration path was provided, relying on environment variables.");
697 None
698 };
699
700 let is_server = matches!(&opt.commands, KanidmdOpt::Server);
701
702 let config = Configuration::build()
703 .add_opt_toml_config(maybe_sconfig)
704 .add_cli_config(&opt.kanidmd_options)
705 .is_server_mode(is_server)
707 .finish();
708
709 let Some(config) = config else {
710 eprintln!(
711 "ERROR: Unable to build server configuration from provided configuration inputs."
712 );
713 return ExitCode::FAILURE;
714 };
715
716 #[cfg(target_family = "windows")]
721 get_user_details_windows();
722
723 let maybe_rt = tokio::runtime::Builder::new_multi_thread()
725 .worker_threads(config.threads)
726 .enable_all()
727 .thread_name("kanidmd-thread-pool")
728 .build();
734
735 let rt = match maybe_rt {
736 Ok(rt) => rt,
737 Err(err) => {
738 eprintln!("CRITICAL: Unable to start runtime! {err:?}");
739 return ExitCode::FAILURE;
740 }
741 };
742
743 if let KanidmdOpt::Scripting { command } = opt.commands {
746 rt.block_on(scripting_command(command, config))
747 } else {
748 rt.block_on(start_daemon(opt, config))
749 }
750}
751
752async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
755 match &opt.commands {
756 KanidmdOpt::Server | KanidmdOpt::ConfigTest => {
757 let config_test = matches!(&opt.commands, KanidmdOpt::ConfigTest);
758 if config_test {
759 info!("Running in server configuration test mode ...");
760 } else {
761 info!("Running in server mode ...");
762 };
763
764 if let Some(tls_config) = config.tls_config.as_ref() {
766 {
767 let i_meta = match metadata(&tls_config.chain) {
768 Ok(m) => m,
769 Err(e) => {
770 error!(
771 "Unable to read metadata for TLS chain file '{}' - {:?}",
772 tls_config.chain.display(),
773 e
774 );
775 let diag =
776 kanidm_lib_file_permissions::diagnose_path(&tls_config.chain);
777 info!(%diag);
778 return ExitCode::FAILURE;
779 }
780 };
781 if !kanidm_lib_file_permissions::readonly(&i_meta) {
782 warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.chain.display());
783 }
784 }
785
786 {
787 let i_meta = match metadata(&tls_config.key) {
788 Ok(m) => m,
789 Err(e) => {
790 error!(
791 "Unable to read metadata for TLS key file '{}' - {:?}",
792 tls_config.key.display(),
793 e
794 );
795 let diag = kanidm_lib_file_permissions::diagnose_path(&tls_config.key);
796 info!(%diag);
797 return ExitCode::FAILURE;
798 }
799 };
800 if !kanidm_lib_file_permissions::readonly(&i_meta) {
801 warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.key.display());
802 }
803 #[cfg(not(target_os = "windows"))]
804 if i_meta.mode() & 0o007 != 0 {
805 warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", tls_config.key.display());
806 }
807 }
808
809 if let Some(ca_dir) = tls_config.client_ca.as_ref() {
810 let ca_dir_path = PathBuf::from(&ca_dir);
812 if !ca_dir_path.exists() {
813 error!(
814 "TLS CA folder {} does not exist, server startup will FAIL!",
815 ca_dir.display()
816 );
817 let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
818 info!(%diag);
819 }
820
821 let i_meta = match metadata(&ca_dir_path) {
822 Ok(m) => m,
823 Err(e) => {
824 error!(
825 "Unable to read metadata for '{}' - {:?}",
826 ca_dir.display(),
827 e
828 );
829 let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
830 info!(%diag);
831 return ExitCode::FAILURE;
832 }
833 };
834 if !i_meta.is_dir() {
835 error!(
836 "ERROR: Refusing to run - TLS Client CA folder {} may not be a directory",
837 ca_dir.display()
838 );
839 return ExitCode::FAILURE;
840 }
841 if kanidm_lib_file_permissions::readonly(&i_meta) {
842 warn!("WARNING: TLS Client CA folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", ca_dir.display());
843 }
844 #[cfg(not(target_os = "windows"))]
845 if i_meta.mode() & 0o007 != 0 {
846 warn!("WARNING: TLS Client CA folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", ca_dir.display());
847 }
848 }
849 }
850
851 let sctx = create_server_core(config, config_test).await;
852 if !config_test {
853 #[cfg(target_os = "linux")]
855 {
856 let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]);
857 let _ = sd_notify::notify(
858 true,
859 &[sd_notify::NotifyState::Status("Started Kanidm 🦀")],
860 );
861 };
862
863 match sctx {
864 Ok(mut sctx) => {
865 loop {
866 #[cfg(target_family = "unix")]
867 {
868 let mut listener = sctx.subscribe();
869 tokio::select! {
870 Ok(()) = tokio::signal::ctrl_c() => {
871 break
872 }
873 Some(()) = async move {
874 let sigterm = tokio::signal::unix::SignalKind::terminate();
875 #[allow(clippy::unwrap_used)]
876 tokio::signal::unix::signal(sigterm).unwrap().recv().await
877 } => {
878 break
879 }
880 Some(()) = async move {
881 let sigterm = tokio::signal::unix::SignalKind::alarm();
882 #[allow(clippy::unwrap_used)]
883 tokio::signal::unix::signal(sigterm).unwrap().recv().await
884 } => {
885 }
887 Some(()) = async move {
888 let sigterm = tokio::signal::unix::SignalKind::hangup();
889 #[allow(clippy::unwrap_used)]
890 tokio::signal::unix::signal(sigterm).unwrap().recv().await
891 } => {
892 sctx.reload().await;
894 info!("Reload complete");
895 }
896 Some(()) = async move {
897 let sigterm = tokio::signal::unix::SignalKind::user_defined1();
898 #[allow(clippy::unwrap_used)]
899 tokio::signal::unix::signal(sigterm).unwrap().recv().await
900 } => {
901 }
903 Some(()) = async move {
904 let sigterm = tokio::signal::unix::SignalKind::user_defined2();
905 #[allow(clippy::unwrap_used)]
906 tokio::signal::unix::signal(sigterm).unwrap().recv().await
907 } => {
908 }
910 Ok(msg) = async move {
912 listener.recv().await
913 } =>
914 match msg {
915 CoreAction::Shutdown => break,
916 CoreAction::Reload => {}
917 },
918 }
919 }
920 #[cfg(target_family = "windows")]
921 {
922 tokio::select! {
923 Ok(()) = tokio::signal::ctrl_c() => {
924 break
925 }
926 }
927 }
928 }
929 info!("Signal received, shutting down");
930 sctx.shutdown().await;
932 }
933 Err(_) => {
934 error!("Failed to start server core!");
935 return ExitCode::FAILURE;
938 }
939 }
940 info!("Stopped 🛑 ");
941 }
942 }
943 KanidmdOpt::CertGenerate => {
944 info!("Running in certificate generate mode ...");
945 cert_generate_core(&config);
946 }
947 KanidmdOpt::Database {
948 commands: DbCommands::Backup(bopt),
949 } => {
950 info!("Running in backup mode ...");
951
952 backup_server_core(&config, Some(&bopt.path));
953 }
954 KanidmdOpt::Database {
955 commands: DbCommands::Restore(ropt),
956 } => {
957 info!("Running in restore mode ...");
958 restore_server_core(&config, &ropt.path).await;
959 }
960 KanidmdOpt::Database {
961 commands: DbCommands::Verify,
962 } => {
963 info!("Running in db verification mode ...");
964 verify_server_core(&config).await;
965 }
966 KanidmdOpt::ShowReplicationCertificate => {
967 info!("Running show replication certificate ...");
968 submit_admin_req_human(
969 config.adminbindpath.as_str(),
970 AdminTaskRequest::ShowReplicationCertificate,
971 )
972 .await;
973 }
974 KanidmdOpt::RenewReplicationCertificate => {
975 info!("Running renew replication certificate ...");
976 submit_admin_req_human(
977 config.adminbindpath.as_str(),
978 AdminTaskRequest::RenewReplicationCertificate,
979 )
980 .await;
981 }
982 KanidmdOpt::RefreshReplicationConsumer { proceed } => {
983 info!("Running refresh replication consumer ...");
984 if !proceed {
985 error!("Unwilling to proceed. Check --help.");
986 } else {
987 submit_admin_req_human(
988 config.adminbindpath.as_str(),
989 AdminTaskRequest::RefreshReplicationConsumer,
990 )
991 .await;
992 }
993 }
994 KanidmdOpt::RecoverAccount { name } => {
995 info!("Running account recovery ...");
996
997 submit_admin_req_human(
998 config.adminbindpath.as_str(),
999 AdminTaskRequest::RecoverAccount {
1000 name: name.to_owned(),
1001 },
1002 )
1003 .await;
1004 }
1005 KanidmdOpt::DisableAccount { name } => {
1006 info!("Running account disable ...");
1007
1008 submit_admin_req_human(
1009 config.adminbindpath.as_str(),
1010 AdminTaskRequest::DisableAccount {
1011 name: name.to_owned(),
1012 },
1013 )
1014 .await;
1015 }
1016 KanidmdOpt::Database {
1017 commands: DbCommands::Reindex,
1018 } => {
1019 info!("Running in reindex mode ...");
1020 reindex_server_core(&config).await;
1021 }
1022 KanidmdOpt::DbScan {
1023 commands: DbScanOpt::ListIndexes,
1024 } => {
1025 info!("👀 db scan - list indexes");
1026 dbscan_list_indexes_core(&config);
1027 }
1028 KanidmdOpt::DbScan {
1029 commands: DbScanOpt::ListId2Entry,
1030 } => {
1031 info!("👀 db scan - list id2entry");
1032 dbscan_list_id2entry_core(&config);
1033 }
1034 KanidmdOpt::DbScan {
1035 commands: DbScanOpt::ListIndexAnalysis,
1036 } => {
1037 info!("👀 db scan - list index analysis");
1038 dbscan_list_index_analysis_core(&config);
1039 }
1040 KanidmdOpt::DbScan {
1041 commands: DbScanOpt::ListIndex(dopt),
1042 } => {
1043 info!("👀 db scan - list index content - {}", dopt.index_name);
1044 dbscan_list_index_core(&config, dopt.index_name.as_str());
1045 }
1046 KanidmdOpt::DbScan {
1047 commands: DbScanOpt::GetId2Entry(dopt),
1048 } => {
1049 info!("👀 db scan - get id2 entry - {}", dopt.id);
1050 dbscan_get_id2entry_core(&config, dopt.id);
1051 }
1052
1053 KanidmdOpt::DbScan {
1054 commands: DbScanOpt::QuarantineId2Entry { id },
1055 } => {
1056 info!("☣️ db scan - quarantine id2 entry - {}", id);
1057 dbscan_quarantine_id2entry_core(&config, *id);
1058 }
1059
1060 KanidmdOpt::DbScan {
1061 commands: DbScanOpt::ListQuarantined,
1062 } => {
1063 info!("☣️ db scan - list quarantined");
1064 dbscan_list_quarantined_core(&config);
1065 }
1066
1067 KanidmdOpt::DbScan {
1068 commands: DbScanOpt::RestoreQuarantined { id },
1069 } => {
1070 info!("☣️ db scan - restore quarantined entry - {}", id);
1071 dbscan_restore_quarantined_core(&config, *id);
1072 }
1073
1074 KanidmdOpt::DomainSettings {
1075 commands: DomainSettingsCmds::Change,
1076 } => {
1077 info!("Running in domain name change mode ... this may take a long time ...");
1078 domain_rename_core(&config).await;
1079 }
1080
1081 KanidmdOpt::DomainSettings {
1082 commands: DomainSettingsCmds::Show,
1083 } => {
1084 info!("Running domain show ...");
1085
1086 submit_admin_req_human(config.adminbindpath.as_str(), AdminTaskRequest::DomainShow)
1087 .await;
1088 }
1089
1090 KanidmdOpt::DomainSettings {
1091 commands: DomainSettingsCmds::UpgradeCheck,
1092 } => {
1093 info!("Running domain upgrade check ...");
1094
1095 submit_admin_req_human(
1096 config.adminbindpath.as_str(),
1097 AdminTaskRequest::DomainUpgradeCheck,
1098 )
1099 .await;
1100 }
1101
1102 KanidmdOpt::DomainSettings {
1103 commands: DomainSettingsCmds::Raise,
1104 } => {
1105 info!("Running domain raise ...");
1106
1107 submit_admin_req_human(config.adminbindpath.as_str(), AdminTaskRequest::DomainRaise)
1108 .await;
1109 }
1110
1111 KanidmdOpt::DomainSettings {
1112 commands: DomainSettingsCmds::Remigrate { level },
1113 } => {
1114 info!("⚠️ Running domain remigrate ...");
1115
1116 submit_admin_req_human(
1117 config.adminbindpath.as_str(),
1118 AdminTaskRequest::DomainRemigrate { level: *level },
1119 )
1120 .await;
1121 }
1122
1123 KanidmdOpt::Database {
1124 commands: DbCommands::Vacuum,
1125 } => {
1126 info!("Running in vacuum mode ...");
1127 vacuum_server_core(&config);
1128 }
1129 KanidmdOpt::Scripting { .. } | KanidmdOpt::Version => {}
1130 }
1131 ExitCode::SUCCESS
1132}